コード例 #1
0
 def __matmul__(self, value):
     if not isinstance(value, Mat):
         raise TypeError("Must use Mat objects for @ matmul")
     elif not CV_[self.dtype.name] in [cv.CV_32F, cv.CV_64F]:
         raise TypeError("Must use float32 or float64 dtype for @ matmul")
     elif not self.dtype == value.dtype:
         raise TypeError("Dtypes must match for @ matmul")
     shape = (self.shape[0], value.shape[1])
     out = Mat(shape, dtype=self.dtype, UMat=self.UMat)
     cv.gemm(self._, value._, alpha=1, src3=None, beta=0, dst=out._)
     return out
コード例 #2
0
def subspaceProject(eigenvectors_column, mean, source):
    source_rows = source.rows
    source_cols = source.cols

    if len(eigenvectors_column) != source_cols * source_rows:
        raise Exception("wrong shape")

    flattened_source = []
    
    for i in range(source_cols):
        for j in range(source_rows):
            flattened_source.append(source[j, i])
        
#         flattened_source += [float(num) for num in row]
#     for row in source:
#         flattened_source += [float(num) for num in row]
    flattened_source = np.array([np.asarray(flattened_source)])
#     flattened_source = (1,flattened_source)
    
    delta_from_mean = cv2.subtract(flattened_source, mean)
    # flatten the matrix then convert to 1 row by many columns
    delta_from_mean = np.asarray([np.hstack(delta_from_mean)])

    empty_mat = np.array(eigenvectors_column, copy=True)  # this is required for the function call but unused
    result = cv2.gemm(delta_from_mean, eigenvectors_column, 1.0, empty_mat, 0.0)
    return result
コード例 #3
0
    def _get_orientation(self, pts):
        # Construct a buffer used by the PCA analysis
        data_pts = np.squeeze(np.array(pts, dtype=np.float64))

        # Perform PCA analysis
        # https://stackoverflow.com/questions/22612828/python-opencv-pcacompute-eigenvalue
        covar, mean = cv2.calcCovarMatrix(
            data_pts, np.mean(data_pts, axis=0),
            cv2.COVAR_SCALE | cv2.COVAR_ROWS | cv2.COVAR_SCRAMBLED)
        eigenvalues, eigenvectors = cv2.eigen(covar)[1:]
        eigenvectors = cv2.gemm(eigenvectors, data_pts - mean, 1, None, 0)
        eigenvectors = np.apply_along_axis(
            lambda n: cv2.normalize(n, dst=None).flat, 1, eigenvectors)

        # Store the centre of the object
        cntr = np.array([int(mean[0, 0]), int(mean[0, 1])])
        self.centres.append(cntr)

        # Draw the principal components
        cv2.circle(self.img, (cntr[0], cntr[1]), 3, (255, 0, 255), 1)
        p1 = cntr + 0.02 * eigenvectors[0] * eigenvalues[0]
        p2 = cntr - 0.02 * eigenvectors[1] * eigenvalues[1]
        # self._draw_axis(copy.copy(cntr), p1, (0, 255, 0), 2)
        # self._draw_axis(copy.copy(cntr), p2, (255, 255, 0), 10)

        return np.arctan2(eigenvectors[0, 1],
                          eigenvectors[0, 0])  # orientation in radians
コード例 #4
0
ファイル: sift.py プロジェクト: solus9/opensift
def interp_step(dog, octv, intvl, r, c):
	dD = deriv_3D(dog, octv, intvl, r, c)
	H = hessian_3D(dog, octv, intvl, r, c)
	H_inv = np.zeros(H.T.shape)
	cv2.invert(H, H_inv, cv2.DECOMP_SVD)
	gm = cv2.gemm(H_inv, dD, -1, None, 0)
	return gm
コード例 #5
0
 def test_gemm_big(self):
     sz = (500, 500)
     a = np.ones(sz, dtype=float)
     b = np.eye(sz[0])
     c = np.ones(sz, dtype=float)
     x = cv2.gemm(a, b, 2, c, 3)
     gold = np.full(sz, 5, dtype=float)
     self.assertTrue(np.array_equal(gold, x),
                     "Array returned by GEMM is not valid")
コード例 #6
0
def getPrincipleAngle(eigVecs1, eigVecs2):
    #e2t = cv2.transpose(eigVecs2)
    #e2t = cv2.transpose(eigVecs2)
    evm = cv2.gemm(eigVecs1,eigVecs2,1,None,1,flags=cv2.GEMM_2_T)
    #evm = np.outer(eigVecs1,e2t)
    w,u,vt = cv2.SVDecomp(evm)
    #print (repr(w))
    #print (repr(float(w[0])))
    return float(w[0]); 
コード例 #7
0
def getSimilarityTransform(src, dst):
    if len(src) < 2 or len(dst) < 2:
        return None

    if len(src[0]) < 2 or len(src[1]) < 2 or len(dst[0]) < 2 or len(dst[1]) < 2:
        return None

    # Separate points
    x1 = src[0][0]
    y1 = src[0][1]
    x2 = src[1][0]
    y2 = src[1][1]

    u1 = dst[0][0]
    v1 = dst[0][1]
    u2 = dst[1][0]
    v2 = dst[1][1]

    # Create matrix
    mat = np.array([
        [x1,  y1, 1, 0],
        [y1, -x1, 0, 1],
        [x2,  y2, 1, 0],
        [y2, -x2, 0, 1]
    ], np.float32)

    # Get the inverse matrix
    mat = np.linalg.inv(mat)

    # Create vector
    vec = np.array([
        [u1],
        [v1],
        [u2],
        [v2]
    ], np.float32)

    # Multiply matrices
    val = cv2.gemm(mat, vec, 1.0, None, 0.0)

    # Get elements
    a = val[0][0]
    b = val[1][0]
    c = val[2][0]
    d = val[3][0]

    # Make output transformation matrix (2 X 3)
    transform = np.array([
        [ a, b, c],
        [-b, a, d]
    ], np.float32)

    return transform
コード例 #8
0
def subspaceReconstruct(eigenvectors_column, mean, projection, image_width, image_height):
    if len(eigenvectors_column[0]) != len(projection[0]):
        raise Exception("wrong shape")

    empty_mat = np.array(eigenvectors_column, copy=True)  # this is required for the function call but unused
    # GEMM_2_T transposes the eigenvector
    result = cv2.gemm(projection, eigenvectors_column, 1.0, empty_mat, 0.0, flags=cv2.GEMM_2_T)

    flattened_array = np.array([np.asarray(result[0])])
    flattened_image = np.hstack(cv2.add(flattened_array, mean))
    flattened_image = np.asarray([np.uint8(num) for num in flattened_image])
    all_rows = []
    for row_index in xrange(image_height):
        row = flattened_image[row_index * image_width: (row_index + 1) * image_width]
        all_rows.append(row)
    image_matrix = np.asarray(all_rows)
    image = normalizeHist(image_matrix)
    return image
コード例 #9
0
def stitch_combine_matrix(matrix_1, matrix_2):
    """
    DESCRIPTION
    This function merges two perspective transform matrixes using 
    multiplication.
    
    INPUT
    matrix_1 = a perspective transform matrix
    matrix_2 = a perspective transform matrix
    
    OUTPUT
    matrix_out = a perspective transform matrix
    """

    #use the cv2.gemm function to merge both matrices
    matrix_out = cv2.gemm(matrix_1, matrix_2, 1, None, 0)
    #return
    return matrix_out
コード例 #10
0
def PCA(PCAInput):
    # The following mimics PCA::operator() implementation from OpenCV's
    # matmul.cpp() which is wrapped by Python cv2.PCACompute(). We can't
    # use PCACompute() though as it discards the eigenvalues.

    # Scrambled is faster for nVariables >> nObservations. Bitmask is 0 and
    # therefore default / redundant, but included to abide by online docs.
    covar, mean = cv2.calcCovarMatrix(PCAInput, cv2.cv.CV_COVAR_SCALE |
                                                cv2.cv.CV_COVAR_ROWS  |
                                                cv2.cv.CV_COVAR_SCRAMBLED)

    eVal, eVec = cv2.eigen(covar, computeEigenvectors=True)[1:]

    # Conversion + normalisation required due to 'scrambled' mode
    eVec = cv2.gemm(eVec, PCAInput - mean, 1, None, 0)
    # apply_along_axis() slices 1D rows, but normalize() returns 4x1 vectors
    eVec = np.apply_along_axis(lambda n: cv2.normalize(n).flat, 1, eVec)

    return mean,  eVec    
コード例 #11
0
def PCA(PCAInput):
    # The following mimics PCA::operator() implementation from OpenCV's
    # matmul.cpp() which is wrapped by Python cv2.PCACompute(). We can't
    # use PCACompute() though as it discards the eigenvalues.

    # Scrambled is faster for nVariables >> nObservations. Bitmask is 0 and
    # therefore default / redundant, but included to abide by online docs.
    covar, mean = cv2.calcCovarMatrix(
        PCAInput, cv2.cv.CV_COVAR_SCALE | cv2.cv.CV_COVAR_ROWS
        | cv2.cv.CV_COVAR_SCRAMBLED)

    eVal, eVec = cv2.eigen(covar, computeEigenvectors=True)[1:]

    # Conversion + normalisation required due to 'scrambled' mode
    eVec = cv2.gemm(eVec, PCAInput - mean, 1, None, 0)
    # apply_along_axis() slices 1D rows, but normalize() returns 4x1 vectors
    eVec = np.apply_along_axis(lambda n: cv2.normalize(n).flat, 1, eVec)

    return mean, eVec
コード例 #12
0
def get_orientation(pts, img):
    sz = len(pts)
    data_pts = np.empty((sz, 2), dtype=np.float64)
    for i in range(data_pts.shape[0]):
        data_pts[i, 0] = pts[i, 0, 0]
        data_pts[i, 1] = pts[i, 0, 1]
    # Perform PCA analysis
    mean = np.empty(0)
    # mean, eigenvectors, eigenvalues = cv2.PCACompute2(data_pts, mean)

    covar, mean = cv2.calcCovarMatrix(data_pts, mean, cv2.COVAR_SCALE |
                                      cv2.COVAR_ROWS |
                                      cv2.COVAR_SCRAMBLED)

    eVal, eVec = cv2.eigen(covar)[1:]

    # Conversion + normalisation required due to 'scrambled' mode
    eVec = cv2.gemm(eVec, data_pts - mean, 1, None, 0)
    # apply_along_axis() slices 1D rows, but normalize() returns 4x1 vectors
    eVec = np.apply_along_axis(lambda n: cv2.normalize(n, n).flat, 1, eVec)

    # Store the center of the object
    # cntr2 = (int(mean[0, 0]), int(mean[0, 1]))
    M = cv2.moments(pts)
    cX = int(M["m10"] / M["m00"])
    cY = int(M["m01"] / M["m00"])
    cntr = (cX, cY)

    cv2.circle(img, cntr, 3, (255, 0, 255), 2)
    # p1 = (cntr[0] + 0.02 * eigenvectors[0, 0] * eigenvalues[0, 0],
    #       cntr[1] + 0.02 * eigenvectors[0, 1] * eigenvalues[0, 0])
    # p2 = (cntr[0] - 0.02 * eigenvectors[1, 0] * eigenvalues[1, 0],
    #       cntr[1] - 0.02 * eigenvectors[1, 1] * eigenvalues[1, 0])

    p1 = (cntr[0] + 0.02 * eVec[0, 0] * eVal[0, 0],
          cntr[1] + 0.02 * eVec[0, 1] * eVal[0, 0])
    p2 = (cntr[0] - 0.02 * eVec[1, 0] * eVal[1, 0],
          cntr[1] - 0.02 * eVec[1, 1] * eVal[1, 0])
    # draw_axis(img, cntr, p1, (0, 255, 0), 1)
    # draw_axis(img, cntr, p2, (255, 255, 0), 5)
    # angle = atan2(eigenvectors[0, 1], eigenvectors[0, 0])  # orientation in radians
    angle = atan2(eVec[0, 1], eVec[0, 0])
    return angle, cntr, eVec[0, 0], eVec[0, 1], eVal[0, 0]
コード例 #13
0
ファイル: main.py プロジェクト: jorreee/CompVis
def pca(X, nb_components=0):
    '''
    Do a PCA analysis on X
    @param X:                np.array containing the samples
                             shape = (nb samples, nb dimensions of each sample)
    @param nb_components:    the nb components we're interested in
    @return: return the nb_components largest eigenvalues and eigenvectors of the covariance matrix and return the average sample 
    '''
    [n,d] = X.shape
    if (nb_components <= 0) or (nb_components>n):
        nb_components = n

    
    #calculate scrambled covariance matrix for increased performance
    [covar, mean] = cv2.calcCovarMatrix(X, cv.CV_COVAR_SCALE |  cv.CV_COVAR_ROWS | cv.CV_COVAR_SCRAMBLED)
    #compute eigenvalues and vectors of scrambled covariance matrix
    [retval,eigenvals,eigenvects] = cv2.eigen(covar, True)
    #calculate the normal eigenvactors from the scrambled eigenvectors
    eigenvects = cv2.gemm(eigenvects, X - mean, 1, None, 0)
    eigenvects = np.apply_along_axis(lambda n: cv2.normalize(n).flat, 1, eigenvects)
    # return the nb_components largest eigenvalues and eigenvectors
    return [eigenvals[:n], np.transpose(eigenvects[:n]), np.transpose(mean)]
コード例 #14
0
import cv2
import numpy as np
from numpy.lib import math

theta = math.radians(10)
rot_mat = np.array([[np.cos(theta), -np.sin(theta)],
                    [np.sin(theta), np.cos(theta)]], np.float32)

pts = np.array([(-150, -150), (150, -150), (150, 150), (-150, 150)],
               np.float32)
pts1 = pts

for i in range(1, 150):
    globals()['pts{}'.format(i + 1)] = cv2.gemm(globals()['pts{}'.format(i)],
                                                rot_mat,
                                                1,
                                                None,
                                                1,
                                                flags=cv2.GEMM_2_T)

#for i , ( pt1 , pt2 ) in enumerate ( zip ( pts1 , pts2 )):
#   print (" pts1 [%d] = %s, pst2 [%d]= %s" %( i , pt1 , i , pt2 ))

image = np.full((500, 500, 3), 0, np.uint8)  #검은색 배경

for i in range(1, 150):
    cv2.polylines(image, [np.int32(globals()['pts{}'.format(i)] + 250)], True,
                  (255, 255, 255), 2)  #흰색 선

cv2.imshow(" assignment3 ", image)
cv2.waitKey(0)
コード例 #15
0
    H2D = np.zeros((size_rows, size_cols, 2))

    rows = 0
    for j in xrange(0, size_cols):
        for i in xrange(0, size_rows):
            H2D[i, j, 0] = Hotf[rows, 0, 0]
            H2D[i, j, 1] = Hotf[rows, 0, 1]
            rows = rows + 1

    # Filtragem

    H2DTr = np.zeros((size_cols, size_rows, 2))
    H2DTr[:, :, 0] = cv2.transpose(H2D[:, :, 0])
    H2DTr[:, :, 1] = cv2.transpose(H2D[:, :, 1])

    ImgFilt = cv2.gemm(H2DTr, ImgDFT, 1, 0, 0)
    # ImgFilt = cv2.gemm(Hotf, ImgDFT, 1, 0, 0)

    cv2.dft(ImgFilt, ImgFiltIDFT, cv2.DFT_INVERSE)  # ?

    #print('{}, c: {} '.format(name, ImgFilt[0, 0, 0]))

    # Plot3D

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')

    x = np.arange(0, 80, 1)
    y = np.arange(0, 80, 1)

    # show2 = np.zeros((, 92)) # completando com 0
コード例 #16
0
for i in range(0,y):
    for j in range(0,x):
        gridx = np.append(gridx, j*phys)
        gridy = np.append(gridy, i*phys)
        gridz = np.append(gridz, 0.0)
        gridt = np.append(gridt, 1.0)

grid = np.array([gridx, gridy, gridz, gridt])

rt = np.zeros((3,4))
for i in range(0, num_planes):

    rot, jac = cv2.Rodrigues(p[i,0:3])
    rt[0:3,0:3] = rot
    for j in range(0,3):
        rt[j,3] = p[i,3+j]

    trans = cv2.gemm(rt, grid, 1.0, grid, 0)
    ax.plot(trans[0,:], trans[1,:], trans[2,:])

# axes
ax.plot([0,100],[0,0],[0,0])
ax.plot([0,0],[0,100],[0,0])
ax.plot([0,0],[0,0],[0,100])

# plotting size vs z
#plt.figure()
#plt.plot(zavg, size)

plt.show()
コード例 #17
0
ファイル: main.py プロジェクト: cskhw/computer-vision-kjm
import cv2
import numpy as np

theta = 18 * np.pi / 180
rot_mat = np.array([[np.cos(theta), -np.sin(theta)],
                    [np.sin(theta), np.cos(theta)]], np.float32)
image = np.full((500, 500, 3), 0, np.uint8)
pts1 = np.array([(0, -100), (100, -50), (100, 50), (0, 100), (-100, 50),
                 (-100, -50)], np.float32)
pts2 = cv2.gemm(pts1, rot_mat, 1, None, 1, flags=cv2.GEMM_2_T)


def move1(pts):
    for i in range(6):
        pts[i][0] = pts[i][0] + 250
        pts[i][1] = pts[i][1] + 250
    return pts


def move2(pts):
    for i in range(6):
        pts[i][0] = pts[i][0] - 250
        pts[i][1] = pts[i][1] - 250
    return pts


move1(pts1)
cv2.polylines(image, [np.int32(pts1)], True, (255, 255, 255), 2)
move2(pts1)
for _ in range(30):
    pts2 = move1(pts2)
npTmp = np.random.random((1024, 1024)).astype(np.float32)

npMat1 = np.stack([npTmp,npTmp],axis=2)
npMat2 = npMat1

cuMat1 = cv.cuda_GpuMat()
cuMat2 = cv.cuda_GpuMat()
cuMat1.upload(npMat1)
cuMat2.upload(npMat2)

print(%timeit cv.cuda.gemm(cuMat1, cuMat2,1,None,0,None,1))


# In[3]:


print(%timeit cv.gemm(npMat1,npMat2,1,None,0,None,1))


# In[ ]:





# In[ ]:




コード例 #19
0
B = np.zeros((3, 3, 2))
B[:, :, 0] = [[3, 6, 9], [
    4,
    7,
    10,
], [5, 8, 11]]
B[:, :, 1] = [[3, 6, 9], [
    4,
    7,
    10,
], [5, 8, 11]]

S1 = np.zeros((2, 3, 2))
S2 = np.zeros((2, 3, 2))

S1 = cv2.gemm(A, B, 1, 0, 0)
S2[:, :, 0] = cv2.gemm(A[:, :, 0], B[:, :, 0], 1, 0, 0)
S2[:, :, 1] = cv2.gemm(A[:, :, 1], B[:, :, 1], 1, 0, 0)

print('-------- A ----------')
print(A)

print('-------- B ----------')
print(B)

print('-------- Geral ----------')
print((S1[:, :, 0]))
print('-')
print((S1[:, :, 1]))
print('\n\n')
コード例 #20
0
def polar_fft(im,
              nangle=None,
              radiimax=None,
              *,
              isshiftdft=False,
              truesize=None,
              logpolar=False,
              logoutput=False,
              interpolation='bilinear'):
    """Return dft in polar (or log-polar) units, the angle step 
    (and the log base)
    
    Parameters
    ----------
    im: 2d array
        The image
    nangle: number, optional
        The number of angles in the polar representation
    radiimax: number, optional
        The number of radius in the polar representation
    isshiftdft: boolean, default False
        True if the image is pre processed (DFT + fftshift)
    truesize: 2 numbers, required if isshiftdft is True
        The true size of the image
    logpolar: boolean, default False
        True if want the log polar representation instead of polar
    logoutput: boolean, default False
        True if want the log of the output
    interpolation: string, default 'bilinear'
        ('bicubic', 'bilinear', 'nearest') The interpolation 
        technique. (For now, avoid bicubic)
    
    Returns
    -------
    im: 2d array
        The (log) polar representation of the input image
    log_base: number, only if logpolar is True
        the log base if this is log polar representation
    
    Notes
    -----
    radiimax is the maximal radius (log of radius if logpolar is true).
    if not provided, it is deduced from the image size
    
    To get log-polar, set logpolar to True
    log_base is the base of the log. It is deduced from radiimax.
    Two images that will be compared should therefore have the same radiimax.
    """
    im = np.asarray(im, dtype=np.float32)
    #get dft if not already done
    if not isshiftdft:
        truesize = im.shape
        #substract mean to avoid having large central value
        im = im - im.mean()
        im = centered_mag_sq_ccs(dft_optsize(im))
    #We need truesize! otherwise border effects.
    assert (truesize is not None)

    #the center is shifted from 0,0 to the ~ center
    #(eg. if 4x4 image, the center is at [2,2], as if 5x5)
    qshape = np.asarray([im.shape[0] // 2, im.shape[1]])
    center = np.asarray([qshape[0], 0])

    #if the angle Step is not given, take the number of pixel
    #on the perimeter as the target #=range/step

    if nangle is None:
        #TODO: understand why nangle need to be exactly truesize
        nangle = np.min(truesize)  #range is pi, nbangle = 2r =~pi r
#        nangle-=2

#get the theta range
    theta = np.linspace(-np.pi / 2,
                        np.pi / 2,
                        nangle,
                        endpoint=False,
                        dtype=np.float32)

    #For the radii, the units are comparable if the log_base and radiimax are
    #the same. Therefore, log_base is deduced from radiimax
    #The step is assumed to be 1
    if radiimax is None:
        radiimax = qshape.min()

    #also as the circle is an ellipse in the image,
    #we want the radius to be from 0 to 1
    if logpolar:
        #The log base solves log_radii_max=log_{log_base}(linear_radii_max)
        #where we decided arbitrarely that linear_radii_max=log_radii_max
        log_base = np.exp(np.log(radiimax) / radiimax)
        radius = ((log_base**np.arange(0, radiimax, dtype=np.float32)) /
                  radiimax)
    else:
        radius = np.linspace(0, 1, radiimax, endpoint=False, dtype=np.float32)

    #get x y coordinates matrix (The units are 0 to 1, therefore a circle is
    #represented as an ellipse)
    y = cv2.gemm(np.sin(theta), radius, qshape[0], 0, 0,
                 flags=cv2.GEMM_2_T) + center[0]
    x = cv2.gemm(np.cos(theta), radius, qshape[1], 0, 0,
                 flags=cv2.GEMM_2_T) + center[1]
    interp = cv2.INTER_LINEAR
    if interpolation == 'bicubic':
        interp = cv2.INTER_CUBIC
    if interpolation == 'nearest':
        interp = cv2.INTER_NEAREST

    #get output
    output = cv2.remap(im, x, y, interp)  #LINEAR, CUBIC,LANCZOS4
    #apply log
    if logoutput:
        output = cv2.log(output)

    if logpolar:
        return output, log_base
    else:
        return output
コード例 #21
0
ファイル: num_2.py プロジェクト: cjh8746/Image_Processing
import numpy as np, cv2

data = [ 3,6,3,-5,6,1,2,-3,5]				    # 1차원 리스트 생성
m1 = np.array(data, np.float32).reshape(3,3)
m2 = np.array([2, 10, 28], np.float32)

ret, inv = cv2.invert(m1, cv2.DECOMP_LU)                # 역행렬 계산
if ret:
    dst1 = inv.dot(m2)                                  # numpy 제공 행렬곱 함수
    dst2 = cv2.gemm(inv, m2, 1, None, 1)                # OpenC 제공 행렬곱 함수
    ret, dst3 = cv2.solve(m1, m2, cv2.DECOMP_LU)        # 연립방정식 풀이

    print("[inv] = \n%s\n" % inv)
    print("[dst1] =", dst1.flatten())                   # 다행 1열 행렬을 한행에 표시
    print("[dst2] =", dst2.flatten())                   # 행렬을 벡터로 변환
    print("[dst3] =", dst3.flatten())                   # 행렬을 벡터로 변환
else:
    print("역행렬이 존재하지 않습니다.")

コード例 #22
0
    def _multiply(self, matrix_a: np.array, matrix_b) -> np.array:

        matrix_c = np.zeros((matrix_a.shape[0], matrix_b.shape[1]))

        return cv2.gemm(matrix_a, matrix_b, 1, matrix_c, 1)
コード例 #23
0
import numpy as np, cv2

pts1 = np.array([(100, 100, 1), (400, 100, 1), (400, 250, 1), (100, 250, 1)],
                np.float32)

theta = 45 * np.pi / 180
m = np.array([[np.cos(theta), -np.sin(theta), 0],
              [np.sin(theta), np.cos(theta), 0], [0, 0, 1]], np.float32)

delta = (pts1[2] - pts1[0]) // 2
center = pts1[0] + delta

a = np.array([center, center, center, center])
t1 = pts1 - a
t2 = a

m2 = cv2.gemm(t1, m, 1, None, 1, flags=cv2.GEMM_2_T)
pts2 = m2 + a

for i, (pt1, pt2) in enumerate(zip(pts1, pts2)):
    print("pts1[%d] = %s, pts2[%d]= %s" % (i, pt1, i, pt2))

image = np.full((400, 500, 3), 255, np.uint8)
cv2.polylines(image, [np.int32(pts1[:, :2])], True, (0, 255, 0), 2)
cv2.polylines(image, [np.int32(pts2[:, :2])], True, (255, 0, 0), 3)
cv2.imshow("image", image)
cv2.waitKey(0)
コード例 #24
0
    print('Shape of matrix_faces is ', np.shape(matrix_faces))
    # covar, mean_1 = cv2.calcCovarMatrix(matrix_faces, mean=None, flags=cv2.COVAR_ROWS|cv2.COVAR_SCRAMBLED)
    covar, mean = cv2.calcCovarMatrix(matrix_faces,
                                      mean=None,
                                      flags=cv2.COVAR_SCALE | cv2.COVAR_ROWS
                                      | cv2.COVAR_SCRAMBLED)
    # mean_x is same as mean_1,  cv2.COVAR_SCALE 相当于1/M, because the default is 1

    print("shape of mean is ", np.shape(mean))
    print("shape of covar is ", np.shape(covar))
    print('covar is', covar)

    eVal, eVec = cv2.eigen(covar, True)[1:]

    # 对eVec进行一系列操作,(ui)T * (A)T,再进行normalization,因为OpenCV都是row vector运算,所以跟论文中相当于都要进行转置
    eVec = cv2.gemm(eVec, matrix_faces - mean_x, 1, None, 0)
    eVec = np.apply_along_axis(lambda n: cv2.normalize(n, n).flat, 1, eVec)
    #     print("shape of eVal is ", np.shape(eVal))
    #     # print(eVal)
    #     # mean_SE = pd.DataFrame.from_dict(mean_SE, orient='index', columns=['values'])
    #     # plt.plot(eVal)
    #     # plt.show()
    #
    #     print("shape of eVec is ", np.shape(eVec))
    #
    #     #     # Compute the eigenvectors from the Matrix (modified from matrix_faces)
    # #     # we need to make each data(face images) as row vectors
    # #     print("Calculating PCA ", end="...")
    #     mean_2, eigenVectors = cv2.PCACompute(matrix_faces, mean=None)
    #     print('mean_1 is', mean_1)
    #     print('mean_x is',mean_x)
コード例 #25
0
    # print("--- X ---  size: {} ".format(matX.shape))
    # print matX[:, :, 1]

    # X'-----------------------------------------------------------

    matXtr[:, :, 0] = cv2.transpose(matX[:, :, 0])
    matXtr[:, :, 1] = cv2.transpose(matX[:, :, 1])

    # print("\n\n--- X' ---  size: {} ".format(matXtr.shape))
    # print matXtr

    # Z = X'*Y ----------------------------------------------------

    matZ = np.zeros((total_img, total_img, 2))
    matZ = cv2.gemm(matXtr, matY, 1, 0, 0)

    # print("\n--  Z = X'*Y  --- size: {} ".format(matZ.shape))

    # Z = inv(Z) --------------------------------------------------

    invZ1 = np.zeros((total_img, total_img))
    invZ2 = np.zeros((total_img, total_img))

    cv2.invert(matZ[:, :, 0], invZ1, cv2.DECOMP_LU)
    # cv2.invert(matZ[:, :, 1], invZ2, cv2.DECOMP_LU)                # :1300 codigo do ze

    matZ[:, :, 0] = invZ1
    matZ[:, :, 1] = invZ2

    # W = Y*Z -----------------------------------------------------
コード例 #26
0
import numpy as np
import cv2 as cv
import time

npTmp = np.random.random((1024, 1024)).astype(np.float32)

npMat1 = np.stack([npTmp, npTmp], axis=2)
npMat2 = npMat1

cuMat1 = cv.cuda_GpuMat()
cuMat2 = cv.cuda_GpuMat()
cuMat1.upload(npMat1)
cuMat2.upload(npMat2)

start = time.time()
cv.cuda.gemm(cuMat1, cuMat2, 1, None, 0, None, 1)
print("CUDA --- %s seconds ---" % (time.time() - start))

cv.gemm(npMat1, npMat2, 1, None, 0, None, 1)
print("CPU --- %s seconds ---" % (time.time() - start))
コード例 #27
0
    # rows = 0
    # for j in xrange(0, size_cols):
    # 	for i in xrange(0, size_rows):
    # 		H2D[i, j, 0] = Hotf[rows, 0, 0]
    # 		H2D[i, j, 1] = Hotf[rows, 0, 1]
    # 		rows = rows + 1

    # # Filtragem

    # H2DTr = np.zeros((size_cols, size_rows, 2))
    # H2DTr[:, :, 0] = cv2.transpose(H2D[:, :, 0])
    # H2DTr[:, :, 1] = cv2.transpose(H2D[:, :, 1])

    # ImgFilt = cv2.gemm(H2DTr, ImgDFT, 1, 0, 0)

    ImgFilt = cv2.gemm(Hotf, ImgDFT, 1, 0, 0)

    cv2.dft(ImgFilt, ImgFiltIDFT, cv2.DFT_INVERSE)  # ?

    print(('{}, c: {} '.format(name, ImgFilt[0, 0, 0])))
    # print '\n\n'
    # # print ImgFiltIDFT[:,:,0]
    # # print '---------------------------'
    # print ImgFilt[:,:,0]

    # Plot3D

    # fig = plt.figure()
    # ax = fig.add_subplot(111, projection='3d')

    # x = np.arange(0, 80, 1)
コード例 #28
0
    rotate_mat[0][0] = 1
    rotate_mat[1][1] = 1
    rotate_mat[2][2] = 1

    angle = args.rotate_angle / 180.0 * 3.14159265358979323846
    cos_a = math.cos(angle)
    sin_a = math.sin(angle)

    rotate_mat[0][0] = cos_a
    rotate_mat[0][1] = sin_a
    rotate_mat[1][0] = -sin_a
    rotate_mat[1][1] = cos_a

    shift_mat2 = np.zeros((2, 3), np.float32)
    shift_mat2[0][0] = 1
    shift_mat2[1][1] = 1

    shift_mat2[0][2] = dst_pt[0]
    shift_mat2[1][2] = dst_pt[1]

    tran_mat = cv2.gemm(shift_mat2, rotate_mat, 1, None, 0)
    tran_mat = cv2.gemm(tran_mat, shift_mat1, 1, None, 0)
    tran_mat = cv2.gemm(tran_mat, crop_mat, 1, None, 0)

    out = cv2.warpAffine(img, tran_mat, (out_wh[0], out_wh[1]))

    cv2.imshow('original image', img)
    cv2.imshow('cropped image', out)
    cv2.imwrite(args.out_path, out)
    cv2.waitKey()