Esempio n. 1
1
def fast_template_matching(img, tmpl, max_level):
    pyr_img = build_pyramid(img, max_level)
    pyr_tmpl = build_pyramid(tmpl, max_level)

    results = []
    for level in range(max_level,-1,-1):
        ref = pyr_img[level]
        tpl = pyr_tmpl[level]
        
        if level == max_level:
            results.append(cv2.matchTemplate(ref, tpl, cv2.TM_CCOEFF_NORMED))
        else:
            mask = cv2.pyrUp(results[-1])
            (_, maxval, _, maxloc) = cv2.minMaxLoc(mask)
            if maxval < 0.5:
                break
            #print maxloc
            mask_h, mask_w = mask.shape
            mask_w = mask_w / 50
            mask_h = mask_h / 50
            tpl_h, tpl_w = tpl.shape
            y = maxloc[1] - mask_h/2
            x = maxloc[0] - mask_w/2
            w = mask_w + tpl_w
            h = mask_h + tpl_h
            res = np.zeros(ref.shape, np.float32)
            if x+w > ref.shape[1] or y+h > ref.shape[0] or x < 0 or y < 0:
                # Out of bounds
                return (0,(0,0))
            
            res[y:y+mask_h+1,x:x+mask_w+1] = cv2.matchTemplate(ref[y:y+h,x:x+w], tpl, cv2.TM_CCOEFF_NORMED)
            results.append(res)

    (_, maxval, _, maxloc) = cv2.minMaxLoc(results[-1])
    return maxval, maxloc
def get_LaplacePyramid(name1,name2,size):
    """
    Function that creates Laplacian pyramids of two images
    name1, name2: names of files containing the images
    size: number of levels in the lapalacian pyramid
    """
    # We first get the gaussian pyramids of each image. Notice 
    # the function written before is invoked here
    gaussPy1,gaussPy2 = get_GaussPyrimids(name1,name2,size)
    # We create a list of Laplacian pyramids; we initialize each list 
    # with the deeper level (smallest image) of each Gaussian pyramid
    Lapl1 = [gaussPy1[size]]; Lapl2 = [gaussPy2[size]]
    # We loop over each element of the Gaussian pyramid. Notice the 
    # looping begins with the smallest image in the Gaussian pyramid 
    # (deepest level)
    # For each element of the Gaussian pyramid...
    for k in range(size,0,-1):
        # Increase size of images in turn...
        G1 = cv2.pyrUp(gaussPy1[k]); G2 = cv2.pyrUp(gaussPy2[k])
        #print G1.shape, G2.shape
        # ... take respective differences with images in turn ...
        L1 = cv2.subtract(gaussPy1[k-1],G1); L2 = cv2.subtract(gaussPy2[k-1],G2)
        # ... and append to the list of Laplacian pyramids
        #print k , L1.shape, L2.shape
        Lapl1.append(L1); Lapl2.append(L2)
    return Lapl1,Lapl2
def debugSingleFeature(eyeData, singleFeature, targets):
    index3 = np.nonzero(targets == 3)[0]
    index4 = np.nonzero(targets == 4)[0]

    direction3 = eyeData[index3]
    direction4 = eyeData[index4]

    feature3 = singleFeature[index3]
    feature4 = singleFeature[index4]

    worst3 = np.argmin(feature3)
    worst4 = np.argmax(feature4)

    image3 = direction3[worst3].reshape((28, 42))  # .transpose()
    image4 = direction4[worst4].reshape((28, 42))  # .transpose()

    image3 = cv2.pyrUp(image3)
    image4 = cv2.pyrUp(image4)

    cv2.namedWindow("singlefeature")

    cv2.imshow("singlefeature", image3)
    cv2.waitKey(0)
    cv2.imshow("singlefeature", image4)
    cv2.waitKey(0)
Esempio n. 4
0
def buildUpsampledLaplacianPyramid(I, nLevels= -1, minSize = 16):
    if nLevels == -1:
        nLevels = getNlevels(I,minSize)

    pyramid = nLevels*[None]
    pyramid[0] = I
    if len(pyramid[0].shape) < 3:
        pyramid[0].shape += (1,)

    for i in range(nLevels-1):
        srcSz = pyramid[i].shape[0:2]
        newSz = tuple([a/2 for a in pyramid[i].shape[0:2]])
        newSz = (newSz[1],newSz[0])
        pyramid[i+1] = cv2.pyrDown(pyramid[i])
        if len(pyramid[i+1].shape) < 3:
            pyramid[i+1].shape += (1,)

    for i in range(nLevels-1):
        newSz = pyramid[i].shape[0:2]
        up = cv2.pyrUp(pyramid[i+1],dstsize=(newSz[1],newSz[0])).astype(np.float32)
        if len(up.shape) < 3:
            up.shape += (1,)
        pyramid[i] = pyramid[i].astype(np.float32) - up

    # Make a stack
    for lvl in range(0,nLevels-1):
        for i in range(nLevels-1,lvl,-1):
            newSz = pyramid[i-1].shape[0:2]
            up = cv2.pyrUp(pyramid[i],dstsize=(newSz[1],newSz[0]))
            if len(up.shape) < 3:
                up.shape += (1,)
            pyramid[i] = np.array(up)

    return pyramid
Esempio n. 5
0
def YUV_stream2RGB_frame(stream):
	w=1280
	h=720
	size=w*h
	print stream
	# stream=np.fromstring(data,np.uint8) #convert data form string to numpy array

	#Y bytes  will start form 0 and end in size-1 
	y=stream[0:size].reshape(h,w) # create the y channel same size as the image

	#U bytes will start from size and end at size+size/4 as its size = framesize/4 

	u=stream[size:(size+(size/4))].reshape((h/2),(w/2))# create the u channel its size=framesize/4

	#up-sample the u channel to be the same size as the y channel and frame using pyrUp func in opencv2
	u_upsize=cv2.pyrUp(u)

	#do the same for v channel 
	v=stream[(size+(size/4)):].reshape((h/2),(w/2))
	v_upsize=cv2.pyrUp(v)
	#create the 3-channel frame using cv2.merge func watch for the order
	yuv=cv2.merge((y,u_upsize,v_upsize))
	# print yuv.shape
	print yuv
	#Convert TO RGB format

	# rgb=cv2.cvtColor(yuv,cv2.COLOR_YUV2BGR)
	rgb=cv2.cvtColor(yuv,cv2.cv.CV_YCrCb2RGB)

	#show frame
	cv2.imshow("preview",rgb)
	cv2.waitKey(5)
Esempio n. 6
0
def get_image_diff (img1, img2):
	"""
		Function: get_image_diff
		------------------------
		given two images, this finds the eroded/dilated difference 
		between them on a coarse grain.
		NOTE: assumes both are full-size, color
	"""
	#=====[ Step 1: convert to gray	]=====
	img1_gray = cv2.cvtColor (img1, cv2.COLOR_BGR2GRAY)
	img2_gray = cv2.cvtColor (img2, cv2.COLOR_BGR2GRAY)	

	#=====[ Step 2: downsample 	]=====
	img1_small = cv2.pyrDown(cv2.pyrDown(img1_gray))
	img2_small = cv2.pyrDown(cv2.pyrDown(img2_gray))	

	#=====[ Step 3: find differnece	]=====
	difference = img2_small - img1_small

	#=====[ Step 4: erode -> dilate	]=====
	kernel = np.ones ((4, 4), np.uint8)
	difference_ed = cv2.dilate(cv2.erode (difference, kernel), kernel)

	#=====[ Step 5: blow back up	]=====
	return cv2.pyrUp (cv2.pyrUp (difference_ed))
Esempio n. 7
0
def process_scale(a_lods, lod):
    a1 = a_lods[lod]
    a2 = cv2.pyrUp(a_lods[lod+1])
    d = a1-a2
    for i in xrange(lod):
        d = cv2.pyrUp(d)
    v = cv2.gaussianBlur(d*d, (3, 3), 0)
    return np.sign(d), v
Esempio n. 8
0
def drawAndSaveCv2(img, shapes, outImgName):
    img = np.array(img[:, :, ::-1]) #cv2format
    for shape in shapes:
        for p in shape:
            p = map(int, p)
            cv2.circle(img, (p[0], p[1]), 3, (0, 255, 0))
    img = cv2.pyrUp(cv2.pyrUp(img))
    cv2.imwrite(outImgName, img)
def main():
    displayer = Displayer()

    A = cv2.cvtColor(fetch_image('apple.jpg'), cv2.COLOR_BGR2RGB)
    B = cv2.cvtColor(fetch_image('orange.jpg'), cv2.COLOR_BGR2RGB)

    # generate Gaussian pyramid for A
    G = A.copy()
    gpA = [G]
    for i in xrange(6):
        G = cv2.pyrDown(G)
        gpA.append(G)

    # generate Gaussian pyramid for B
    G = B.copy()
    gpB = [G]
    for i in xrange(6):
        G = cv2.pyrDown(G)
        gpB.append(G)

    # generate Laplacian Pyramid for A
    lpA = [gpA[5]]
    for i in xrange(5,0,-1):
        GE = cv2.pyrUp(gpA[i])
        L = cv2.subtract(gpA[i-1],GE)
        lpA.append(L)

    # generate Laplacian Pyramid for B
    lpB = [gpB[5]]
    for i in xrange(5,0,-1):
        GE = cv2.pyrUp(gpB[i])
        L = cv2.subtract(gpB[i-1],GE)
        lpB.append(L)

    # Now add left and right halves of images in each level
    LS = []
    for la,lb in zip(lpA,lpB):
        rows,cols,dpt = la.shape
        ls = np.hstack((la[:,0:cols/2], lb[:,cols/2:]))
        LS.append(ls)

    # now reconstruct
    ls_ = LS[0]
    for i in xrange(1,6):
        ls_ = cv2.pyrUp(ls_)
        displayer.add_image(ls_, i)
        ls_ = cv2.add(ls_, LS[i])

    # image with direct connecting each half
    real = np.hstack((A[:,:cols/2],B[:,cols/2:]))

    displayer.add_image(ls_, "pyramid")
    #displayer.add_image(real, "stacked")

    #for i in range(len(lpA)):
    #    displayer.add_image(LS[i], i)

    displayer.display()
Esempio n. 10
0
def Laplacian_Pyramid_Blending_with_mask(A, B, m, num_levels=6):
    # assume mask is float32 [0,1]

    # generate Gaussian pyramid for A,B and mask
    GA = A.copy()
    GB = B.copy()
    GM = m.copy()
    gpA = [GA]
    gpB = [GB]
    gpM = [GM]
    for i in xrange(num_levels):
        GA = cv2.pyrDown(GA)
        GB = cv2.pyrDown(GB)
        GM = cv2.pyrDown(GM)
        gpA.append(np.float32(GA))
        gpB.append(np.float32(GB))
        gpM.append(np.float32(GM))

    # generate Laplacian Pyramids for A,B and masks
    lpA = [gpA[num_levels - 1]
          ]  # the bottom of the Lap-pyr holds the last (smallest) Gauss level
    lpB = [gpB[num_levels - 1]]
    gpMr = [gpM[num_levels - 1]]
    for i in xrange(num_levels - 1, 0, -1):
        # Laplacian: subtarct upscaled version of lower level from current level
        # to get the high frequencies
        LA = np.subtract(gpA[i - 1],
                         cv2.pyrUp(
                             gpA[i],
                             dstsize=(gpA[i - 1].shape[1],
                                      gpA[i - 1].shape[0])))

        LB = np.subtract(gpB[i - 1],
                         cv2.pyrUp(
                             gpB[i],
                             dstsize=(gpB[i - 1].shape[1],
                                      gpB[i - 1].shape[0])))
        lpA.append(LA)
        lpB.append(LB)
        gpMr.append(gpM[i - 1])  # also reverse the masks

    # Now blend images according to mask in each level
    LS = []
    for la, lb, gm in zip(lpA, lpB, gpMr):
        ls = la * gm + lb * (1.0 - gm)
        LS.append(ls)

    # now reconstruct
    ls_ = LS[0]
    for i in xrange(1, num_levels):
        ls_ = cv2.resize(cv2.pyrUp(ls_), dsize=(LS[i].shape[1], LS[i].shape[0]))
        ls_ = cv2.add(ls_, LS[i].astype('float32'))

    return ls_
Esempio n. 11
0
def buildLaplacianPyramid(I, nLevels= -1, minSize = 16, useStack = False):
    if nLevels == -1:
        nLevels = getNlevels(I,minSize)

    pyramid = nLevels*[None]
    pyramid[0] = I
    if len(pyramid[0].shape) < 3:
        pyramid[0].shape += (1,)
    # All levels have the same resolution
    if useStack:
        # Gaussian pyramid
        for i in range(nLevels-1):
            srcSz = pyramid[i].shape[0:2]
            newSz = tuple([a/2 for a in pyramid[i].shape[0:2]])
            newSz = (newSz[1],newSz[0])
            pyramid[i+1] = cv2.pyrDown(pyramid[i])
            if len(pyramid[i+1].shape) < 3:
                pyramid[i+1].shape += (1,)

        # Make a stack
        for lvl in range(0,nLevels-1):
            for i in range(nLevels-1,lvl,-1):
                newSz = pyramid[i-1].shape[0:2]
                up = cv2.pyrUp(pyramid[i],dstsize=(newSz[1],newSz[0]))
                if len(up.shape) < 3:
                    up.shape += (1,)
                pyramid[i] = np.array(up)

        lapl = nLevels*[None]
        lapl[nLevels-1] = np.copy(pyramid[nLevels-1])
        for i in range(0,nLevels-1):
            lapl[i] = pyramid[i].astype(np.float32) - pyramid[i+1].astype(np.float32)
        pyramid = lapl

    else:
        for i in range(nLevels-1):
            srcSz = pyramid[i].shape[0:2]
            newSz = tuple([a/2 for a in pyramid[i].shape[0:2]])
            newSz = (newSz[1],newSz[0])
            pyramid[i+1] = cv2.pyrDown(pyramid[i])
            if len(pyramid[i+1].shape) < 3:
                pyramid[i+1].shape += (1,)

        for i in range(nLevels-1):
            newSz = pyramid[i].shape[0:2]
            up = cv2.pyrUp(pyramid[i+1],dstsize=(newSz[1],newSz[0])).astype(np.float32)
            if len(up.shape) < 3:
                up.shape += (1,)
            pyramid[i] = pyramid[i].astype(np.float32) - up



    return pyramid
Esempio n. 12
0
	def show_edges (self):

		# diff = (self.image_region_normalized - self.last_image_region_normalized)
		# gray = cv2.cvtColor ()
		# cv2.imshow ('NORMALIZED', cv2.pyrUp(cv2.pyrUp(np.abs(self.image_region_normalized - self.last_image_region_normalized))))

		cv2.imshow ('EDGES', cv2.pyrUp(cv2.pyrUp(self.edges)))
		cv2.imshow ('REGION', cv2.pyrUp(cv2.pyrUp(self.image_region)))

		key = 0
		while key != 27:
			key = cv2.waitKey (30)
def problem_7(imgA,imgB):
    #Another solution using image pyramids
    #For Gaussain Pyramids
    diffLevels_A=imgA.copy()
    GaussPyr_A=[]
    GaussPyr_A.append(diffLevels_A)         # Gaussian Pyramids for imgA
    for itr in range(4):
        diffLevels_A=cv2.pyrDown(diffLevels_A)
        GaussPyr_A.append(diffLevels_A)
    diffLevels_B=imgB.copy()                # Gaussian Pyramids for imgB
    GaussPyr_B=[];GaussPyr_B.append(diffLevels_B)
    for itr in range(4):
        diffLevels_B=cv2.pyrDown(diffLevels_B)
        GaussPyr_B.append(diffLevels_B)

    #For Laplacian Pyramids   (Laplacian pyramids will be appended from lowest resolution to highest resolution)
    LaplacePyr_A=[GaussPyr_A[3]]   #Since we start building the Laplacian pyramids from the bottom
    for itr in range(3,0,-1):
        temp_A=cv2.pyrUp(GaussPyr_A[itr])
        d=(GaussPyr_A[itr-1].shape[0],GaussPyr_A[itr-1].shape[1],3)
        temp_A=np.resize(temp_A,d)
        LDiff=cv2.subtract(GaussPyr_A[itr-1],temp_A)      #Bcoz "GaussPyr_A[itr-1]" has a higher resolution than "GaussPyr_A[itr]"
        LaplacePyr_A.append(LDiff)

    LaplacePyr_B=[GaussPyr_B[3]]
    for itr in range(3,0,-1):
        temp_B=cv2.pyrUp(GaussPyr_B[itr])
        d=(GaussPyr_B[itr-1].shape[0],GaussPyr_B[itr-1].shape[1],3)
        temp_B=np.resize(temp_B,d)
        LDiff=cv2.subtract(GaussPyr_B[itr-1],temp_B)
        LaplacePyr_B.append(LDiff)

    #Blending the two Laplacian Pyramids (all resolution levels)
    Blend=[]
    #Note: Blend will have pyramids blended from lower to higher resolution
    for LapA,LapB in zip(LaplacePyr_A,LaplacePyr_B):
        Lr,Lc,dimension=LapA.shape
        temp=np.hstack((LapA[:,0:Lc/2],LapB[:,Lc/2:]))
        # Laplacian pyramid at each level is blended. This will help reconstruction of image
        Blend.append(temp)

    #Reconstructing the Image from the pyramids (Laplcian to Gaussian)
    final_temp=Blend[0]
    for itr in range(1,4):
        final_temp=cv2.pyrUp(final_temp)
        d=(Blend[itr].shape[0],Blend[itr].shape[1],3)
        final_temp=np.resize(final_temp,d)
        final_temp=cv2.add(final_temp,Blend[itr])       #L[i]=G[i]-G[i-1]..diff of gaussian..So, G[i]=L[i]+G[i-1]

    final_img=np.hstack((imgA[:,0:Lc/2],imgB[:,Lc/2:]))
    cv2.imshow("Final Blended Image",final_temp)
    cv2.imwrite("P_7.jpg",final_temp)
    cv2.waitKey(0)
Esempio n. 14
0
def blend(image1, image2, mask):
    # generate Gaussian pyramid for image 1
    G = image1.astype(np.float32)
    gpA = [G]
    for i in xrange(6):
        G = cv2.pyrDown(G)
        gpA.append(G.astype(np.float32))

    # generate Gaussian pyramid for image 2
    G = image2.astype(np.float32)
    gpB = [G]
    for i in xrange(6):
        G = cv2.pyrDown(G)
        gpB.append(G.astype(np.float32))

    # generate Gaussian pyramid for mask
    G = mask.astype(np.float32)
    gpM = [G]
    for i in xrange(6):
        G = cv2.pyrDown(G)
        gpM.append(G.astype(np.float32))

    # generate Laplacian Pyramid for image 1
    lpA = [gpA[5]]
    for i in xrange(5,0,-1):
        rows,cols = gpA[i-1].shape[:2]
        GE = cv2.pyrUp(gpA[i])[:rows,:cols]
        L = cv2.subtract(gpA[i-1],GE)
        lpA.append(L)

    # generate Laplacian Pyramid for image 2
    lpB = [gpB[5]]
    for i in xrange(5,0,-1):
        rows,cols = gpB[i-1].shape[:2]
        GE = cv2.pyrUp(gpB[i])[:rows,:cols]
        L = cv2.subtract(gpB[i-1],GE)
        lpB.append(L)

    # Now add the images with mask
    LS = []
    length = len(lpA)
    for i in range(length):
        LS.append(lpB[i]*gpM[length-i-1] + lpA[i]*(1-gpM[length-i-1]))

    # now reconstruct
    ls_ = LS[0]
    for i in xrange(1,6):
        rows,cols = LS[i].shape[:2]
        ls_ = cv2.pyrUp(ls_)[:rows,:cols]
        ls_ = cv2.add(ls_, LS[i])
    ls_ = np.clip(ls_, 0, 255)
    return ls_.astype(np.uint8)
Esempio n. 15
0
File: main.py Progetto: Daiver/jff
def main_view_set():
    torch_fuze.utils.manual_seed(42)
    train_set, test_set = mk_quad_points_dataset(20, 20)
    print("TRAIN SET:")
    for img, target in train_set:
        print(target)
        cv2.imshow('', cv2.pyrUp(cv2.pyrUp(img)))
        cv2.waitKey()
    print("TEST SET:")
    for img, target in test_set:
        print(target)
        cv2.imshow('', cv2.pyrUp(cv2.pyrUp(img)))
        cv2.waitKey()
Esempio n. 16
0
def laplacian_pyramid_blending(img_in1, img_in2):

    # Write laplacian pyramid blending codes here

    img_in1 = img_in1[:, :img_in1.shape[0]]
    img_in2 = img_in2[:img_in1.shape[0], :img_in1.shape[0]]

    # generate Gaussian pyramid for A
    G = img_in1.copy()
    gpA = [G]
    for i in xrange(6):
        G = cv2.pyrDown(G)
        gpA.append(G)

    # generate Gaussian pyramid for B
    G = img_in2.copy()
    gpB = [G]
    for i in xrange(6):
        G = cv2.pyrDown(G)
        gpB.append(G)

    # generate Laplacian Pyramid for A
    lpA = [gpA[5]]
    for i in xrange(5, 0, -1):
        GE = cv2.pyrUp(gpA[i])
        L = cv2.subtract(gpA[i - 1], GE)
        lpA.append(L)

    # generate Laplacian Pyramid for B
    lpB = [gpB[5]]
    for i in xrange(5, 0, -1):
        GE = cv2.pyrUp(gpB[i])
        L = cv2.subtract(gpB[i - 1], GE)
        lpB.append(L)

    # Now add left and right halves of images in each level
    LS = []
    for la, lb in zip(lpA, lpB):
        rows, cols, dpt = la.shape
        ls = np.hstack((la[:, 0:cols / 2], lb[:, cols / 2:]))
        LS.append(ls)

    # now reconstruct
    ls_ = LS[0]
    for i in xrange(1, 6):
        ls_ = cv2.pyrUp(ls_)
        ls_ = cv2.add(ls_, LS[i])

    img_out = ls_  # Blending result
    return True, img_out
def blend(image, tilesize):
    b=1
    a = len(image)
    l = len(image[0])
    p= image[0:tileSize,30:tileSize]
    for j in range(0,len(image)-tileSize,tileSize):
        for k in range(0,len(image[0])-tileSize,tileSize):

            A = image[j+tileSize-3:j+tileSize,k+tileSize-3:k+tileSize]
            G = A.copy()
            gpA = [G]
            for i in xrange(3):
                G = cv2.pyrDown(G)
                gpA.append(G)
            B = image[j:j+tileSize,k+tileSize:k+tileSize+tileSize]
            G = B.copy()
            gpB = [G]
            for i in xrange(3):
                G = cv2.pyrDown(G)
                gpB.append(G)
            lpA = [gpA[2]]
            for i in xrange(2,0,-1):
                GE = cv2.pyrUp(gpA[i])
                b = GE[0:len(gpA[i-1]),0:len(gpA[i-1])]
                L = cv2.subtract(gpA[i-1],b)
                lpA.append(L)
            lpB = [gpB[2]]
            for i in xrange(2,0,-1):
                GE = cv2.pyrUp(gpB[i])
                b = GE[0:len(gpB[i-1]),0:len(gpB[i-1])]
                L = cv2.subtract(gpB[i-1],b)
                lpB.append(L)
             # Now add left and right halves of images in each level
            LS = []
            for la,lb in zip(lpA,lpB):
                rows,cols,dpt = la.shape
                p =la[:,0:2]
                pp= lb[:,cols/2:]
                ls = np.hstack((la[:,0:2], lb[:,cols/2:]))
                LS.append(ls)
            ls_ = LS[0]
            # now reconstruct
            for i in xrange(1,3):
                ls_ = cv2.pyrUp(ls_)
                b = ls_[0:len(LS[i]),0:len(LS[i])]
                ls_ = cv2.add(b, LS[i])
             #cv2.imwrite('Pyramid_blending2.jpg',ls_)
            image[j+tileSize-2:j+tileSize-2+tileSize,k+tileSize-2:k+tileSize-2+tileSize] = ls_
    return image
Esempio n. 18
0
 def up(self, im):
     "assume `im' is at self.level & restore"
     for level in reversed(range(self.level)):
         # make sure we get back to the original size exactly
         divisor = float(pow(2, level))
         w = int(numpy.ceil(self.size[0] / divisor))
         h = int(numpy.ceil(self.size[1] / divisor))
         if len(im.shape)>2:
             shape = (h,w,im.shape[2])
         else:
             shape = (h,w)
         z = numpy.zeros(shape, dtype=numpy.uint8)
         cv2.pyrUp(im, z, (w,h))
         im = z
     return im
Esempio n. 19
0
def pyr_build(img):
	#lp = []
	G = [img.astype(numpy.float32)]
	#first build the array images to build from 
	for i in range(pyrSize-1):
		gi1 = cv2.pyrDown(G[i])
		G.append(gi1)

	lp = []

	#now build the actual pyramid from that array
	for i in range(0, pyrSize-1):
		h, w, d = G[i].shape
		gi1_up = cv2.pyrUp(G[i+1], None, (h, w))
		L = G[i] - gi1_up
		lp.append(L)
	# used the 2 lines below to verify theory about the limits of
	# when the depth continued to change things - comment out because
	# the print is otherwise unnecessary 
	# print "smallest height was", h
	# print "smallest width was", w

	#L[N] = G[N]
	lp.append(G[-1]) 
	return lp
Esempio n. 20
0
    def handle(self):
        # self.greyImg = cv2.GaussianBlur(self.sourceImg, (5, 5), 2)

        img = self.sourceImg
        self.shape = img.shape
        width = self.shape[1]
        height = self.shape[0]

        # TODO 将过大图像缩小或者过小图像放大
        if width > 400 or height > 300:
            self.sourceImg = cv2.pyrDown(self.sourceImg, (400, 400 * width / height))
        else:
            if width < 400 or height < 300:
                self.sourceImg = cv2.pyrUp(self.sourceImg, (400, 400 * width / height))

        # 显示原图
        cv2.namedWindow("image")
        cv2.imshow("image", self.sourceImg)
        cv2.waitKey(0)

        self.shape = self.sourceImg.shape
        width = self.shape[1]
        height = self.shape[0]

        self.destination = np.float32([[0, 0], [width, 0], [width, height], [0, height]])

        self.greyImg = cv2.cvtColor(self.sourceImg, cv2.COLOR_BGR2GRAY)  # 图像灰度化
        print self.greyImg.shape[1]
        print self.greyImg.shape[0]
Esempio n. 21
0
def pyr_build(Gi):

  lp = []

  for i in range (NUM_PYR):
    #scale it down
    h = Gi.shape[0]
    w = Gi.shape[1]
    Gi1 = cv2.pyrDown(Gi)
    print 'Gi1 has shape', Gi1.shape

    #just specifying size as (w, h) works
    uGi1 = cv2.pyrUp(Gi1, dstsize=(w, h))
    print 'unsmall has shape', uGi1.shape

    Gi = Gi.astype(numpy.float32)
    uGi1 = uGi1.astype(numpy.float32)
    Li = Gi - uGi1
    lp.append(Li)

    cv2.imshow(win, 0.5 + 0.5 * (Li / numpy.abs(Li).max()))

    while cv2.waitKey(15) < 0: pass

    Gi = Gi1

  Gi = Gi.astype(numpy.float32)
  lp.append(Gi)
  cv2.imshow(win, Gi/255.0)
  while cv2.waitKey(15) < 0: pass

  return lp
Esempio n. 22
0
def pyr_reconstruct(lp):

    Ri = lp[-1]

    for i in range(len(lp)-1,0,-1):

        h = Ri.shape[0]
        w = Ri.shape[1]

        print "h = ", h
        print "w = ", w

        uRi = cv2.pyrUp(Ri, dstsize=(2*w, 2*h))

        print "uRi h = ", uRi.shape[0]
        print "uRi w = ", uRi.shape[1]

        print "lp[i-1] h = ", lp[i-1].shape[0]
        print "lp[i-1] w = ", lp[i-1].shape[1]

        Ri1 = uRi + lp[i-1]

        Ri = Ri1

        print "iteration = ", i

    Ri = Ri.astype(numpy.uint8)
    cv2.imshow(win, Ri)
    while cv2.waitKey(15) < 0: pass
Esempio n. 23
0
	def cartoonizer(self, imgRGB):
		numDownSamples = 2		# number of downscaling steps
		numBilateralFilters = 7 # number of bilateral filtering steps

		# -- STEP 1 --
		# downsample image using Gaussian pyramid
		imgColor = imgRGB
		for i in xrange(numDownSamples):
			imgColor = cv3.pyrDown(imgColor)
			
		# repeatedly apply small bilateral filter instead of applying
		# one large filter
		for i in xrange(numBilateralFilters):
			imgColor = cv3.bilateralFilter(imgColor, 9, 9, 7)
			
		# upsample image to original size
		for i in xrange(numDownSamples):
			imgColor = cv3.pyrUp(imgColor)

		# -- STEPS 2 and 3 --
		# convert to grayscale and apply median blur
		imgGray = cv3.cvtColor(imgRGB, cv3.COLOR_RGB2GRAY)
		imgBlur = cv3.medianBlur(imgGray, 7)

		# -- STEP 4 --
		# detect and enhance edges
		imgEdge = cv3.adaptiveThreshold(imgBlur, 255, cv3.ADAPTIVE_THRESH_MEAN_C, cv3.THRESH_BINARY, 9, 2)

		# -- STEP 5 --
		# convert back to color so that it can be bit-ANDed with color image
		imgEdge = cv3.cvtColor(imgEdge, cv3.COLOR_GRAY2RGB)
		return cv3.bitwise_and(imgColor, imgEdge)
def processFrame( model, writer, frame, height, width, params ):
    # Run ViBe on the current frame to update the model.
    frameStartTime = time.time()
    model.update(frame)
    frameEndTime = time.time()
    
    if not params.no_out:
        print "seconds for ViBe processing: %f" % (
            frameEndTime - frameStartTime)

    # Overlay the current frame with the results.
    # channels = cv2.split(frame)
    # blank_image = numpy.zeros((height, width), numpy.uint8)
    # combined = model.foreGround

    channel = np.zeros((height, width, 1), np.uint8)
    # fullSized = cv2.pyrUp(cv2.pyrUp(cv2.pyrUp(model.foreGround)))
    fullSized = model.foreGround
    for pyr in range(model.runnerParams.pyrNum):
        fullSized = cv2.pyrUp(fullSized)
    fullSized = postProcessing(fullSized)

    resultOneChannel = cv2.bitwise_or(channel, fullSized)
    combined = cv2.merge((
        resultOneChannel,
        resultOneChannel,
        resultOneChannel
    ))
    
    return combined, combined
    def label_face(self, image):
        self.image = image
        image_temp = cv2.pyrUp(image)
        name = 'Current image -- see terminal window for instructions.'
        msgs = ['Click the top of the forehead!',
                'Click the bottom of the chin!',
                'Click the leftmost part of the head! (usually an ear)',
                'Click the rightmost part of the head! (usually an ear)',
                'How does this look?']

        self.clicked = []
        self.face = [image_temp.shape[1], image_temp.shape[0], -1, -1]
        for msg in msgs:
            cv2.namedWindow(name)
            cv2.rectangle(image_temp, tuple(self.face[0:2]), tuple(self.face[2:4]), (0,0,255), 1)
            cv2.imshow(name, image_temp)
            cv2.setMouseCallback(name, self.mouse_callback)
            self.need_click = True
            rospy.loginfo(msg)
            key = cv2.waitKey(0)
            if key == 1048696:  # the letter "x", lowercase
                self.keep_going = False
                rospy.loginfo('Quitting!')
                break
            else: 
                self.face[0] = min(self.face[0], self.clicked[-1][0])  # left boundary
                self.face[1] = min(self.face[1], self.clicked[-1][1])  # top boundary
                self.face[2] = max(self.face[2], self.clicked[-1][0])  # right boundary
                self.face[3] = max(self.face[3], self.clicked[-1][1])  # bottom boundary

        mask_temp = self.make_mask(image_temp)
        self.mask = cv2.pyrDown(mask_temp)
Esempio n. 26
0
def get_tle_vid_goos(vid_name,divisor):
    cap = cv2.VideoCapture(vid_name)
    time = 0
    tot_time=int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    energies=np.zeros(tot_time,dtype=np.int64)
    cap.set(cv2.CAP_PROP_POS_FRAMES,time)
    ret,frame = cap.read()
    in_frame=np.zeros(frame.shape,dtype=np.int64)
    small_frame=cv2.pyrDown(frame)
    blur_frame=np.zeros(frame.shape,dtype=np.int64)
    goose=cv2.getGaussianKernel(frame.shape[0],frame.shape[0]/divisor)
    goose1=cv2.getGaussianKernel(frame.shape[1],frame.shape[1]/divisor)
    goosq=np.dot(goose,np.transpose(goose1))
    m=1/np.max(goosq)
    center_frame=np.zeros(frame.shape,dtype=np.float64)
    for color in range(3):
        center_frame[:,:,color]=goosq*m
    print tot_time
    while(time<tot_time):
        in_frame[:,:,:]=frame
        small_frame[:,:,:]=cv2.pyrDown(frame)
        blur_frame[:,:,:]=cv2.pyrUp(small_frame)[:frame.shape[0],:frame.shape[1],:]
        tle=np.sum(center_frame*np.abs(np.subtract(in_frame,blur_frame)))
        energies[time]=tle
        if time%50==0:
            print time
        time=time+1
        cap.set(cv2.CAP_PROP_POS_FRAMES,time)
        ret,frame = cap.read()
        
    return energies
Esempio n. 27
0
	def render(self,frame):
		canvas = cv2.imread("pen.jpg", cv2.CV_8UC1)
		numDownSamples = 2
		img_rgb = frame
		# number of downscaling steps
		numBilateralFilters = 3
		# number of bilateral filtering steps
		# -- STEP 1 --
		# downsample image using Gaussian pyramid
		img_color = img_rgb
		for _ in xrange(numDownSamples):
			img_color = cv2.pyrDown(img_color)
		# repeatedly apply small bilateral filter instead of applying
		# one large filter
		for _ in xrange(numBilateralFilters):
			img_color = cv2.bilateralFilter(img_color, 9, 9, 3)

		# upsample image to original size
		for _ in xrange(numDownSamples):
			img_color = cv2.pyrUp(img_color)
		# convert to grayscale and apply median blur
		img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2GRAY)
		img_blur = cv2.medianBlur(img_gray, 3)

		# detect and enhance edges
		img_edge = cv2.adaptiveThreshold(img_blur, 255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY, 9, 2)
		return  cv2.multiply(cv2.medianBlur(img_edge,7), canvas, scale=1./256)
Esempio n. 28
0
	def render(self,frame):
		numDownSamples = 2
		img_rgb = frame
		# number of downscaling steps
		numBilateralFilters = 7
		# number of bilateral filtering steps
		# -- STEP 1 --
		# downsample image using Gaussian pyramid
		img_color = img_rgb
		for _ in xrange(numDownSamples):
			img_color = cv2.pyrDown(img_color)
		# repeatedly apply small bilateral filter instead of applying
		# one large filter
		for _ in xrange(numBilateralFilters):
			img_color = cv2.bilateralFilter(img_color, 9, 9, 7)

		# upsample image to original size
		for _ in xrange(numDownSamples):
			img_color = cv2.pyrUp(img_color)
		# convert to grayscale and apply median blur
		img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2GRAY)
		img_blur = cv2.medianBlur(img_gray, 7)

		# detect and enhance edges
		img_edge = cv2.adaptiveThreshold(img_blur, 255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY, 9, 2)
		# -- STEP 5 --
		# convert back to color so that it can be bit-ANDed with color image
		img_edge = cv2.cvtColor(img_edge, cv2.COLOR_GRAY2RGB)
		final = cv2.bitwise_and(img_color, img_edge)
		return cv2.medianBlur(final,7)
Esempio n. 29
0
    def threshold_gradient_strength(self, gradient_mag):
        """ thresholds the gradient strength such that features are emphasized
        """
        lo, hi = gradient_mag.min(), gradient_mag.max()
        threshold = lo + self.params['gradient/threshold']*(hi - lo)
        bw = (gradient_mag > threshold).astype(np.uint8)
        
        for _ in xrange(2):
            bw = cv2.pyrDown(bw)

        # do morphological opening to remove noise
        w = 2#0
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (w, w))
        bw = cv2.morphologyEx(bw, cv2.MORPH_OPEN, kernel)
    
        # do morphological closing to locate objects
        w = 2#0
        bw = cv2.copyMakeBorder(bw, w, w, w, w, cv2.BORDER_CONSTANT, 0)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2*w + 1, 2*w + 1))
        bw = cv2.morphologyEx(bw, cv2.MORPH_CLOSE, kernel)
        bw = bw[w:-w, w:-w].copy()

        for _ in xrange(2):
            bw = cv2.pyrUp(bw)
        
        return bw
Esempio n. 30
0
    apple_copy = cv2.pyrDown(apple_copy)
    gp_apple.append(apple_copy)

orange_copy = orange.copy()
gp_orange = [orange_copy]

#Gaussian pyramid for orange
for i in range(6):
    orange_copy = cv2.pyrDown(orange_copy)
    gp_orange.append(orange_copy)

#genrating laplacian pyramid for apple
apple_copy = gp_apple[5]
lp_apple = [apple_copy]
for i in range(5, 0, -1):
    gaussian_expanded = cv2.pyrUp(gp_apple[i])
    laplacian = cv2.subtract(gp_apple[i - 1], gaussian_expanded)
    lp_apple.append(laplacian)

#genrating laplacian pyramid for orange
orange_copy = gp_orange[5]
lp_orange = [orange_copy]
for i in range(5, 0, -1):
    gaussian_expanded = cv2.pyrUp(gp_orange[i])
    laplacian = cv2.subtract(gp_orange[i - 1], gaussian_expanded)
    lp_orange.append(laplacian)

for i in range(5, 0, -1):
    gaussian_expanded = cv2.pyrUp(gp_orange[i])
    laplacian = cv2.subtract(gp_orange[i - 1], gaussian_expanded)
    lp_orange.append(laplacian)
Esempio n. 31
0
def softmix(A, B):
    ra, ca, dpt = A.shape
    rb, cb, dpt = B.shape
    #rrate=float(ra)/float(rb)
    #crate=float(ca)/float(cb)
    #B = cv2.resize(B,None,fx=rrate,fy=crate,interpolation = cv2.INTER_AREA)
    B = cv2.resize(B, (ca, ra), interpolation=cv2.INTER_AREA)
    #cv2.imshow('frame2',B)
    level = 3
    GA = A.copy()
    gpA = [GA]
    GB = B.copy()
    gpB = [GB]
    for i in xrange(level):
        # generate Gaussian pyramid for A
        #size=np.array(GA.shape[0:2],dtype=int)/2+1
        GA = cv2.pyrDown(GA)  #, dstsize=tuple(size))
        gpA.append(GA)
        # generate Gaussian pyramid for B
        GB = cv2.pyrDown(GB)  #,dstsize=tuple(size))
        gpB.append(GB)

    LA = gpA[level]
    LB = gpB[level]
    LS = []
    # Now add left and right halves of images in each level
    rows, cols, dpt = LA.shape
    ls = np.concatenate((LA[:, 0:cols / 8], LB[:, cols / 8:7 * cols / 8],
                         LA[:, 7 * cols / 8:cols]),
                        axis=1)
    #ls=np.concatenate((LA[0:rows/8,:],ls[rows/8:7*rows/8,:],LA[7*rows/8:rows,:]), axis=0)
    ls = np.concatenate((ls[0:7 * rows / 8, :], LA[7 * rows / 8:rows, :]),
                        axis=0)

    LS.append(ls)
    for i in xrange(level, 0, -1):
        size = np.array(gpA[i - 1].shape[0:2], dtype=int)
        # generate Laplacian Pyramid for A
        GE = cv2.pyrUp(gpA[i], dstsize=(size[1], size[0]))
        LA = cv2.subtract(gpA[i - 1], GE)
        #lpA.append(LA)

        # generate Laplacian Pyramid for B
        GE = cv2.pyrUp(gpB[i], dstsize=(size[1], size[0]))
        LB = cv2.subtract(gpB[i - 1], GE)
        #lpB.append(LB)
        # Now add left and right halves of images in each level
        rows, cols, dpt = LA.shape
        ls = np.concatenate((LA[:, 0:cols / 8], LB[:, cols / 8:7 * cols / 8],
                             LA[:, 7 * cols / 8:cols]),
                            axis=1)
        #ls=np.concatenate((LA[0:rows/8,:],ls[rows/8:7*rows/8,:],LA[7*rows/8:rows,:]), axis=0)
        ls = np.concatenate((ls[0:7 * rows / 8, :], LA[7 * rows / 8:rows, :]),
                            axis=0)
        LS.append(ls)

    # now reconstruct
    ls_ = LS[0]
    for i in xrange(1, level + 1):
        size = np.array(LS[i].shape[0:2], dtype=int)
        ls_ = cv2.pyrUp(ls_, dstsize=(size[1], size[0]))
        ls_ = cv2.add(ls_, LS[i])

    return ls_
Esempio n. 32
0
    apple_copy = cv2.pyrDown(apple_copy)
    gp_apple.append(apple_copy)

#generate gaussian pyramid fororange
orange_copy = orange.copy()
gp_orange = [orange_copy]

for i in range(6):
    orange_copy = cv2.pyrDown(orange_copy)
    gp_orange.append(orange_copy)

# generate laplacian pyramid for apple
apple_copy = gp_apple[5]
lp_apple = [apple_copy]
for i in range(5, 0, -1):
    gaussian_expanded = cv2.pyrUp(gp_apple[i])
    laplacian = cv2.subtract(gp_apple[i - 1], gaussian_expanded)
    lp_apple.append(laplacian)

# generate laplacian pyramid for orange
orange_copy = gp_orange[5]
lp_orange = [orange_copy]
for i in range(5, 0, -1):
    gaussian_expanded = cv2.pyrUp(gp_orange[i])
    laplacian = cv2.subtract(gp_orange[i - 1], gaussian_expanded)
    lp_orange.append(laplacian)

# Now add left and right halves of images in each level
apple_orange_pyramid = []
n = 0
for apple_lap, orange_lap in zip(lp_apple, lp_orange):
Esempio n. 33
0
def Blur(imagem):
    imagem = cv2.pyrDown(imagem)
    imagem = cv2.pyrUp(imagem)
    return locals
Esempio n. 34
0
        img1 = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
        num_down = 2 # number of downsampling steps
        num_bilateral = 7 # number of bilateral filtering steps


        # downsample image using Gaussian pyramid
        img_color = img1
        for _ in range(num_down):
            img_color = cv2.pyrDown(img_color)
        # repeatedly apply small bilateral filter instead of
        # applying one large filter
        for _ in range(num_bilateral):
            img_color = cv2.bilateralFilter(img_color, d=9, sigmaColor=9, sigmaSpace=7)
        # upsample image to original size
        for _ in range(num_down):
            img_color = cv2.pyrUp(img_color)

        #STEP 2 & 3
        #Use median filter to reduce noise
        # convert to grayscale and apply median blur
        img_gray = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)
        img_blur = cv2.medianBlur(img_gray, 7)

        #STEP 4
        #Use adaptive thresholding to create an edge mask
        # detect and enhance edges
        img_edge = cv2.adaptiveThreshold(img_blur, 255,
                                         cv2.ADAPTIVE_THRESH_MEAN_C,
                                         cv2.THRESH_BINARY,
                                         blockSize=9,
                                         C=2)
Esempio n. 35
0
    gpA.append(G)

#%%

# generate Gaussian pyramid for B - from base to tip.
G = B.copy()
gpB = [G]
for i in xrange(n):
    G = cv2.pyrDown(G)
    gpB.append(G)

#%%
# generate Laplacian Pyramid for A - from tip to base
lpA = [gpA[n]]
for i in xrange(n, 0, -1):
    GE = cv2.pyrUp(gpA[i])
    L = cv2.subtract(gpA[i - 1], GE)
    lpA.append(L)
#%%
# generate Laplacian Pyramid for B - from tip to base
lpB = [gpB[n]]
for i in xrange(n, 0, -1):
    GE = cv2.pyrUp(gpB[i])
    L = cv2.subtract(gpB[i - 1], GE)
    lpB.append(L)

#%%
# Now add left and right halves of images in each level - from tip to base.
LS = []
for la, lb in zip(lpA, lpB):
    rows, cols, dpt = la.shape
if __name__ == '__main__':

    #this experiment relies upon a single input argument
    if len(sys.argv) < 2:
        print 'USAGE connected_component_test.py <input image name>'
        sys.exit(-1)

    img = cv2.imread(sys.argv[1])
    (h, w, d) = img.shape

    #convert to single channel grayscale, and form scaled and unscaled binary images
    #we scale the binary image to have a copy with tones (zip-a-tones) removed
    #and we form a binary image that's unscaled for use in final masking
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    scaled = cv2.pyrUp(cv2.pyrDown(gray, dstsize=(w / 2, h / 2)),
                       dstsize=(w, h))
    (binthresh, binary) = cv2.threshold(scaled, 190, 255,
                                        cv2.THRESH_BINARY_INV)
    (binthresh_gray, binary_unscaled) = cv2.threshold(gray, 190, 255,
                                                      cv2.THRESH_BINARY_INV)

    #scale = estimate_scale(binary)
    #Draw out statistics on average connected component size in the rescaled, binary image
    components = get_connected_components(binary)
    sorted_components = sorted(components, key=area_bb)
    #sorted_components = sorted(components,key=lambda x:area_nz(x,binary))
    areas = zeros(binary.shape)
    for component in sorted_components:
        if amax(areas[component]) > 0: continue
        areas[component] = area_bb(component)**0.5
        #areas[component]=area_nz(component,binary)
Esempio n. 37
0
T = np.float32([[1, 0, w / 4], [0, 1, h / 4]])
transl = cv2.warpAffine(swars, T, (w, h))

#2.  Rotação
# Matriz de Rotação:  [[cos x -sen x][sen x cos x]]
R = cv2.getRotationMatrix2D((w / 2, h / 2), 45, 1)
rot = cv2.warpAffine(swars, R, (w, h))

#3.  Scaling
scal1 = cv2.resize(swars, None, fx=0.6, fy=0.6)
scal2 = cv2.resize(swars, None, fx=0.6, fy=0.6, interpolation=cv2.INTER_CUBIC)
scal3 = cv2.resize(swars, (900, 400), interpolation=cv2.INTER_AREA)

#4.  Pyramiding
smaller = cv2.pyrDown(swars)
larger = cv2.pyrUp(swars)

#5.  Cropping
sr, sc = int(h * .25), int(w * .25)
er, ec = int(h * .5), int(w * .5)

cropped = swars[sr:er, sc:ec]

#5.  Funções aritméricas com imagens
# Adição
M = np.ones(swars.shape, dtype="uint8") * 58
adicao = cv2.add(swars, M)
subtracao = cv2.subtract(swars, M)
#cv2.imshow('Translação', transl)
#cv2.imshow('Rotação', rot)
#cv2.imshow('INTER_LINEAR', scal1)

# 展示图像函数
def show_img(img_name, img_src):
    cv2.imshow(img_name, img_src)
    cv2.waitKey(2000)
    cv2.destroyAllWindows()


# 高斯金字塔,拉普拉斯金字塔

img = cv2.imread("AM.png")
show_img("AM", img)

# 向上采样,就是放大
up = cv2.pyrUp(img)
show_img("up_img", up)

# 向下采样,就是缩小
down = cv2.pyrDown(img)
show_img("down_img", down)

# 先向上采样,再向下采样,两次都存在精度损失,跟原始对比,效果肯定差
up_down = cv2.pyrDown(up)
res = np.hstack((img, up_down))
show_img("img-up_down", res)

# image代表输入的图片。注意输入的图片必须为二值图片。若输入的图片为彩色图片,必须先进行灰度化和二值化。
# mode
# 表示轮廓的检索模式,有4种:
# cv2.RETR_EXTERNAL
    def LaplacianBlend(self,images, masks, n=5):
        """
        Blending the input the input images using Laplacian Blending. Essentially, we reduce the 
        the image to smaller sizes using OpenCV()'s pyrDown() and obtaining a gaussian pyramid.
        Then upsample the images using pyrUp() and finding the difference of Gaussian and hence 
        the laplacian pyramid. Image mask are used to find the partition and the Laplacian pyramid 
        images are joined to get the desired result's laplacian pyramid. Next, the pyramid is upsampled 
        again and add the Gaussians at each level to get the desired result.

        """

        # Ensuring the input images are a multiple of 2^n where n is the number of 
        # layers of the laplacian pyramid.
        print(images[0].shape)
        assert(images[0].shape[0] % pow(2, n) ==
            0 and images[0].shape[1] % pow(2, n) == 0)

        # Empty list of Gaussian pyramids and laplacian pyramids for each of the input images
        g_pyramids = [None]*len(images)
        l_pyramids = [None]*len(images)

        _, W, _ = images[0].shape

        # Calculating pyramids of the images
        for i in range(len(images)):

            # Gaussian Pyramids
            G = images[i].copy()
            g_pyramids[i] = [G] #Storing the pyramids in a list to the corresponding image index
            for _ in range(n):
                G = cv2.pyrDown(G)
                g_pyramids[i].append(np.float32(G))

            # Laplacian Pyramids
            l_pyramids[i] = [G]  
            for j in range(len(g_pyramids[i])-2, -1, -1):
                G_up = cv2.pyrUp(G)
                G = g_pyramids[i][j]
                L = G - G_up # Difference of Gaussian (DOG)
                l_pyramids[i].append(L) #Storing the pyramids in a list to the corresponding image index 
        
        # Making the masks boolean for further operations
        for i in range(len(masks)):
            masks[i] = masks[i].astype('bool')
        
        common_mask = masks[0].copy() #All masks will be iterated and will be combined to the common mask
        common_image = images[0].copy() #All images will be iterated and will be combined to the common image
        common_pyramids = [l_pyramids[0][i].copy()
                            for i in range(len(l_pyramids[0]))] #All pyramids will be iterated and will be combined to the common pyr

        final_image = None
        # Iterating on the images.
        for i in range(1, len(images)):

            _, x1 = np.where(common_mask == 1)
            _, x2 = np.where(masks[i] == 1)

            #  Sorting the common image and the image to be added to left and right
            if np.max(x1) > np.max(x2):
                left_py = l_pyramids[i]
                right_py = common_pyramids

            else:
                left_py = common_pyramids
                right_py = l_pyramids[i]

            # Finding the region of intersection between the common image and the current image
            mask_intersection = np.bitwise_and(common_mask, masks[i])
            if True in mask_intersection:
                _, x = np.where(mask_intersection == 1)
                # finding the coordinate for the verticle line which would helpin overlapping the left and the right image.
                x_min, x_max = np.min(x), np.max(x)
                midPt = ((x_max-x_min)/2 + x_min)/W
                # Finally we add the pyramids
                LS = []
                for lap_L, lap_R in zip(left_py, right_py):
                    _, cols, _ = lap_L.shape
                    ls = np.hstack((lap_L[:, 0:int(midPt*cols)], lap_R[:, int(midPt*cols):]))

                    LS.append(ls)
            # If there is no intersection, simply add the images
            else:
                LS = []
                for lap_L, lap_R in zip(left_py, right_py):
                    _, cols, _ = lap_L.shape
                    ls = lap_L + lap_R
                    LS.append(ls)

            # Reconstruct the image
            final_image = LS[0]
            for j in range(1, n+1):
                final_image = cv2.pyrUp(final_image)
                final_image = final_image + LS[j]
                final_image[final_image>255] = 255; final_image[final_image<0] = 0 
                
            common_image = final_image

            common_mask = np.sum(common_image.astype(bool), axis=2).astype(bool)
            common_pyramids = LS

        return np.uint8(final_image)
Esempio n. 40
0
                    rimg = cv2.flip(img, 1)
                    #Erosion
                    kernel = np.ones((5, 5), np.uint8)
                    erosion = cv2.erode(img_GRAY, kernel, iterations=1)
                    #Dilation
                    dilation = cv2.dilate(img_GRAY, kernel, iterations=1)
                    #Opening
                    opening = cv2.morphologyEx(img_GRAY, cv2.MORPH_OPEN,
                                               kernel)
                    #Closing
                    closing = cv2.morphologyEx(img_GRAY, cv2.MORPH_CLOSE,
                                               kernel)
                    #Lower resolution
                    lower_reso = cv2.pyrDown(img)
                    #Higher resolution
                    higher_reso = cv2.pyrUp(img)
                    #on parcours les colonnes de l'image avec un pas de cols/5
                    for j in range(0,
                                   cols - math.floor(cols / 5) + cols % 5 + 1,
                                   math.floor(cols / 10)):
                        #on parcours les lignes de l'image avec un pas de rows/5
                        for i in range(
                                0, rows - math.floor(rows / 5) + cols % 5 + 1,
                                math.floor(rows / 10)):
                            img_crop_1 = img[i:i + math.ceil(rows / 5),
                                             j:j + math.ceil(cols / 5)]
                            chemin_img_crop_1 = chemin_dossier_traite + '/' + dossier + '_' + nom_fichier + '_' + str(
                                i) + '_' + str(j) + ext_fichier

                            cv2.imwrite(chemin_img_crop_1,
                                        cv2.resize(img_crop_1, (299, 299)))
# create Gaussian pyramid 1
# We can find Gaussian pyramids using cv.pyrDown() and cv.pyrUp() functions.
layer = img1.copy()
gaussian_pyramid = [layer]

for i in range(6):
    layer = cv2.pyrDown(layer)
    gaussian_pyramid.append(layer)

# create laplacian (laplaacian) pyramid 1
layer = gaussian_pyramid[5]
laplacian_pyramid = [layer]
# start =5, stop = 0, -1 = reverse order
for i in range(5, 0, -1):
    size = (gaussian_pyramid[i-1].shape[1], gaussian_pyramid[i-1].shape[0])
    gaussian_expanded = cv2.pyrUp(gaussian_pyramid[i], dstsize = size)
    laplacian = cv2.subtract(gaussian_pyramid[i-1], gaussian_expanded)
    laplacian_pyramid.append(laplacian)

###################

# create Gaussian pyramid 2
# We can find Gaussian pyramids using cv.pyrDown() and cv.pyrUp() functions.
layer = img2.copy()
gaussian_pyramid2 = [layer]

for i in range(6):
    layer = cv2.pyrDown(layer)
    gaussian_pyramid2.append(layer)

# create laplacian (laplaacian) pyramid 2
Esempio n. 42
0
# -*- coding: utf-8 -*-
# @author   :   eko.zhan
# @time     :   2021/9/2 19:56
import cv2
import numpy as np

img = cv2.imread("../data/dog-1.jpg")

# 拉普拉斯金字塔 L = G -PyrUp(PyrDown(G))
pyr_img = img - cv2.pyrUp(cv2.pyrDown(img))

print(pyr_img.shape)

cv2.imshow("res0", img)
cv2.imshow("res1", pyr_img)
cv2.waitKey()
cv2.destroyAllWindows()
Esempio n. 43
0
def amplifyGauss(pyramid, index, levels):
    filteredFrame = pyramid[index]
    for level in range(levels):
        filteredFrame = cv2.pyrUp(filteredFrame)
    filteredFrame = filteredFrame[:videoHeight, :videoWidth]
    return filteredFrame
Esempio n. 44
0
    closing = cv2.morphologyEx(thres, cv2.MORPH_CLOSE, kernel)
    opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel)
    #closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
    edges = cv2.Canny(closing,100,200)

    #edges = cv2.cornerHarris(gray, 5)
    #ret,thresh = cv2.threshold(gray,50,255,cv2.THRESH_BINARY)

    empty = np.zeros(frame.shape, dtype=np.uint8)


    #kernel = np.ones((5,5),np.uint8)
    #edges = cv2.dilate(edges,kernel,iterations = 2) # really chunks it up

    #contours,hierarchy= cv2.findContours(closing,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    #cv2.drawContours(empty, contours, -1, (0,0,255), 1)
    #cv2.imshow('res',cv2.pyrDown(empty))
    show = closing
    for i in range(N-1):
        show = cv2.pyrUp(show)
    cv2.imshow('gray',gray)
    cv2.imshow('gld',show)
    #cv2.imshow('contours',empty)
    #cv2.imshow('res',cv2.pyrDown(edges))

    k = cv2.waitKey(5) & 0xFF
    if k == 27:
        break

cv2.destroyAllWindows()
Esempio n. 45
0
import cv2
import numpy as np
img = cv2.imread('lena.jpg')
lr = cv2.pyrDown(img)
hr = cv2.pyrUp(img)

cv2.imshow('original', img)
cv2.imshow('down', lr)
cv2.imshow('up', hr)
cv2.waitKey(0)
cv2.destroyAllWindows()
Esempio n. 46
0
    async def recv(self):
        frame = await self.track.recv()

        if self.transform == "cartoon":
            img = frame.to_ndarray(format="bgr24")

            # prepare color
            img_color = cv2.pyrDown(cv2.pyrDown(img))
            for _ in range(6):
                img_color = cv2.bilateralFilter(img_color, 9, 9, 7)
            img_color = cv2.pyrUp(cv2.pyrUp(img_color))

            # prepare edges
            img_edges = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
            img_edges = cv2.adaptiveThreshold(
                cv2.medianBlur(img_edges, 7),
                255,
                cv2.ADAPTIVE_THRESH_MEAN_C,
                cv2.THRESH_BINARY,
                9,
                2,
            )
            img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB)

            # combine color and edges
            img = cv2.bitwise_and(img_color, img_edges)

            # rebuild a VideoFrame, preserving timing information
            new_frame = VideoFrame.from_ndarray(img, format="bgr24")
            new_frame.pts = frame.pts
            new_frame.time_base = frame.time_base
            return new_frame
        elif self.transform == "edges":
            # perform edge detection
            img = frame.to_ndarray(format="bgr24")
            img = cv2.cvtColor(cv2.Canny(img, 100, 200), cv2.COLOR_GRAY2BGR)

            # rebuild a VideoFrame, preserving timing information
            new_frame = VideoFrame.from_ndarray(img, format="bgr24")
            new_frame.pts = frame.pts
            new_frame.time_base = frame.time_base
            return new_frame
        elif self.transform == "rotate":
            # rotate image
            img = frame.to_ndarray(format="bgr24")
            rows, cols, _ = img.shape
            M = cv2.getRotationMatrix2D((cols / 2, rows / 2), frame.time * 45, 1)
            img = cv2.warpAffine(img, M, (cols, rows))

            # rebuild a VideoFrame, preserving timing information
            print(img[0])
            new_frame = VideoFrame.from_ndarray(img, format="bgr24")
            new_frame.pts = frame.pts
            new_frame.time_base = frame.time_base
            return new_frame
        else:
            # return frame
            # print(frame)
            # if not self.ws:
            #     self.ws = await self.wsSession.ws_connect("http://192.168.5.10:5000/endpoint")

            img, warped, needsUpdate = processFrame(frame.to_ndarray(format="bgr24"))
            new_frame = VideoFrame.from_ndarray(img, format="bgr24")
            new_frame.pts = frame.pts
            new_frame.time_base = frame.time_base

            if needsUpdate:
                imencoded = cv2.imencode(".jpg", warped)[1]
                async with ClientSession() as session:
                    async with session.post('http://127.0.0.1:5000/request-update', data=imencoded.tobytes()) as resp:
                    # async with session.get('http://127.0.0.1:5000/request-update') as resp:
                        # print(resp.status)
                        print(await resp.text())
                        # await resp.text()
                        session.close()
                # self.ws.send_str("update requested")

            return new_frame
])
rasters1, rasters2 = reg4(rasters1, rasters2, N)
imgg1 = cv.merge((rasters1[2], rasters1[1], rasters1[0]))
imgg2 = np.uint8(cv.merge((rasters2[2], rasters2[1], rasters2[0])))

img1h = cv.merge((rasters1[0], rasters1[1], rasters1[2]))
img2h = np.uint8(cv.merge((rasters2[0], rasters2[1], rasters2[2])))

sp = img1h.shape
img1h = img1h[sp[0] % (1 << T):, sp[1] % (1 << T):, :]
img2h = img2h[sp[0] % (1 << T):, sp[1] % (1 << T):, :]
change = []
for i in range(T):
    img1d = cv.pyrDown(img1h)
    img2d = cv.pyrDown(img2h)
    img1l = cv.subtract(img1h, cv.pyrUp(img1d))
    img2l = cv.subtract(img2h, cv.pyrUp(img2d))
    # plt.subplot(1, 3, 1), plt.imshow(cv.equalizeHist(cv.cvtColor(img1l, cv.COLOR_BGR2GRAY)), 'gray')
    # plt.subplot(1, 3, 2), plt.imshow(cv.equalizeHist(cv.cvtColor(img2l, cv.COLOR_BGR2GRAY)), 'gray')
    img1l = np.array([img1h[:, :, j] for j in range(N)])
    img2l = np.array([img2h[:, :, j] for j in range(N)])

    change.append(CDetect4(img1l, img2l, 4, N, 0))

    img1h = img1d.copy()
    img2h = img2d.copy()

    # plt.subplot(1, 3, 3), plt.imshow(change[-1], 'gray')
    # plt.show()

img1h = np.array([img1h[:, :, j] for j in range(N)])
Esempio n. 48
0
import cv2


img = cv2.imread('lena.jpg')

layer = img.copy()
gp = [layer]

for i in range(6):
	layer = cv2.pyrDown(layer)
	gp.append(layer)
	#cv2.imshow(str(i),layer)

layer = gp[5]
cv2.imshow("Upper level of GP",layer)
lp= [layer]
for i in range(5,0,-1):
	gaussian = cv2.pyrUp(gp[i])
	lapa = cv2.subtract(gp[i-1],gaussian)
	cv2.imshow(str(i),lapa)



cv2.imshow('Original',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Esempio n. 49
0
print("Number of loop iterations: " + str(numberOfIterations))

dstSW = np.ones((height * 2, width * 2), np.uint8)

xFimgY = mem_manager.cma_array(
    (height, width), np.uint8)  #allocated physically contiguous numpy array
xFimgY[:] = imgY[:]  # copy source data

xFdst = mem_manager.cma_array(
    (height * 2, width * 2),
    np.uint8)  #allocated physically contiguous numpy array

print("Start SW loop")
startSW = time.time()
for i in range(numberOfIterations):
    cv2.pyrUp(imgY, dst=dstSW)  #pyrUp on ARM
stopSW = time.time()

print("Start HW loop")
startPL = time.time()
for i in range(numberOfIterations):
    xv2.pyrUp(
        xFimgY, dst=xFdst
    )  #pyrUp offloaded to PL, working on physically continuous numpy arrays
stopPL = time.time()

print("SW frames per second: ", ((numberOfIterations) / (stopSW - startSW)))
print("PL frames per second: ", ((numberOfIterations) / (stopPL - startPL)))

print("Checking SW and HW results match")
numberOfDifferences, errorPerPixel = cvu.imageCompare(xFdst, dstSW, True,
Esempio n. 50
0
 def __blur_image(self):
     for i in range(0, self.__blur_times):
         self.__img = cv2.pyrDown(self.__img)
         self.__img = cv2.pyrUp(self.__img)
 def apply_blur_and_downsampling(self):
     result = cv2.pyrUp(self.__image)
     self.__show_result(result, "blur and downsample")
Esempio n. 52
0
        phi[idx] = vectorize(PATH + filename)
        labels.append(1)
    PATH = "../data/neutral/"
    offset = idx + 1
    for idx, filename in enumerate(neutralfiles):
        phi[idx + offset] = vectorize(PATH + filename)
        labels.append(0)

    lr = logistic.Logistic(dim)
    lr.train(phi, labels)

    d_red = cv2.cv.RGB(150, 55, 65)
    l_red = cv2.cv.RGB(250, 200, 200)

    orig = cv2.imread("braillerstuv.jpg")
    orig = cv2.pyrUp(orig)
    img = orig.copy()
    img2 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    detector = cv2.FeatureDetector_create('MSER')
    fs = detector.detect(img2)
    fs.sort(key=lambda x: -x.size)

    def supress(x):
        for f in fs:
            distx = f.pt[0] - x.pt[0]
            disty = f.pt[1] - x.pt[1]
            dist = math.sqrt(distx * distx + disty * disty)
            if (f.size > x.size) and (dist < f.size / 2):
                return True
Esempio n. 53
0
'''
图像金字塔。这是(高斯采样)向上取样。
'''

import cv2

o = cv2.imread('C:\\D\\testImgs\\chapter12\\image' + '\\p.bmp')
r1 = cv2.pyrUp(o)
r2 = cv2.pyrUp(r1)
r3 = cv2.pyrUp(r2)

cv2.imshow('o', o)
cv2.imshow('r1', r1)
cv2.imshow('r2', r2)
cv2.imshow('r3', r3)

cv2.waitKey()
cv2.destroyAllWindows()
Esempio n. 54
0
def pyr_up(image):

    pyr_up_img = cv2.pyrUp(image)

    return pyr_up_img
Esempio n. 55
0
apple_gaussian_p = [apple_pyramid_level]
for i in range(NUM_OF_LEVELS):
    apple_pyramid_level = cv2.pyrDown(apple_pyramid_level)
    apple_gaussian_p.append(apple_pyramid_level)

# generate Gaussian pyramid for orange
orange_pyramid_level = orange.copy()
orange_gaussian_p = [orange_pyramid_level]
for i in range(NUM_OF_LEVELS):
    orange_pyramid_level = cv2.pyrDown(orange_pyramid_level)
    orange_gaussian_p.append(orange_pyramid_level)

# generate Diff Pyramid for apple
apple_diff_p = [apple_gaussian_p[NUM_OF_LEVELS - 1]]
for i in range(NUM_OF_LEVELS - 1, 0, -1):
    apple_diff_level = cv2.pyrUp(apple_gaussian_p[i])
    apple_diff_level = cv2.subtract(apple_gaussian_p[i - 1], apple_diff_level)
    apple_diff_p.append(apple_diff_level)

# generate Diff Pyramid for orange
orange_diff_p = [orange_gaussian_p[NUM_OF_LEVELS - 1]]
for i in range(NUM_OF_LEVELS - 1, 0, -1):
    orange_diff_level = cv2.pyrUp(orange_gaussian_p[i])
    L = cv2.subtract(orange_gaussian_p[i - 1], orange_diff_level)
    orange_diff_p.append(L)

# Now add left and right halves of images in each level
blend_diff_p = []
for la, lb in zip(apple_diff_p, orange_diff_p):
    rows, cols, dpt = la.shape
    blend_diff_level = np.hstack(
numpySecond = cv2.imread(filename='./samples/multiband-orange.png', flags=cv2.IMREAD_COLOR).astype(numpy.float32) / 255.0

# blend the apple and the orange using multiband blending with paplacian pyramids

# creating a laplacian pyramid with seven levels for each of the two images

numpyFirst = [ numpyFirst ]
numpySecond = [ numpySecond ]


for intLevel in range(6):
	numpyFirst.append(cv2.pyrDown(numpyFirst[-1]))
	numpySecond.append(cv2.pyrDown(numpySecond[-1]))

	
	numpyFirst[-2] -= cv2.pyrUp(numpyFirst[-1])
	numpySecond[-2] -= cv2.pyrUp(numpySecond[-1])

# end



#add left and right halves of images in each level
halfImg = []
for m,n in zip(numpyFirst,numpySecond):
    r1,c1,d1 = m.shape
    r2,c2,d2 = n.shape
    #temp = numpy.concatenate((m[:,0.0:c1/2], n[:,c2/2:]))
    temp = numpy.hstack((m[:,0.0:c1/2], n[:,c2/2:]))
    halfImg.append(temp)
def apply_makeup(subject, warped_target):
    zeros = np.zeros(warped_target.shape, dtype=warped_target.dtype)
    ones = np.ones(warped_target.shape, dtype=warped_target.dtype)
    face_mask = np.where(warped_target==[0,0,0], zeros, ones*255)
    cv2.imshow('mask', face_mask)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

    sub_lab = cv2.cvtColor(subject, cv2.COLOR_BGR2LAB)
    tar_lab = cv2.cvtColor(warped_target, cv2.COLOR_BGR2LAB)

    sl, sa, sb = cv2.split(sub_lab)
    tl, ta, tb = cv2.split(tar_lab)

    face_struct_s, skin_detail_s = decomposition(sl)
    face_struct_t, skin_detail_t = decomposition(tl)

    #color transfer
    gamma = .8
    '''
    type = sa.dtype
    sa.dtype = float
    ta.dtype = float
    sb.dtype = float
    tb.dtype = float
    '''
    type = sa.dtype
    ra = np.where(True, sa*(1-gamma)+ta*gamma, zeros[:,:,0])
    rb = np.where(True, sb*(1-gamma)+tb*gamma, zeros[:,:,0])
    ra = ra.astype(type)
    rb = rb.astype(type)
    #print(ra.shape)
    ra = cv2.bitwise_and(ra,ra,mask = face_mask[:,:,0])
    rb = cv2.bitwise_and(rb,rb,mask = face_mask[:,:,0])



    #skin-detail transfer
    gammaI = 0
    gammaE = 1
    skin_detail_r = np.where(True, skin_detail_s*gammaI + skin_detail_t*gammaE, zeros[:,:,0])
    skin_detail_r = skin_detail_r.astype(type)


    #Work on the base layer
    fp_mask = find_mask(subject, True)
    src_gauss = cv2.pyrDown(face_struct_s)
    src_lapla = face_struct_s - cv2.pyrUp(src_gauss)
    dst_gauss = cv2.pyrDown(face_struct_t)
    dst_lapla = face_struct_t - cv2.pyrUp(dst_gauss)
    face_struct_r = np.where(face_mask[:,:,0]==0, face_struct_s, dst_lapla + cv2.pyrUp(src_gauss))
    cv2.imshow('transfering target', face_struct_r)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
    face_struct_r = np.where(fp_mask[:,:,0]==255, face_struct_s, face_struct_r)

    #cv2.imshow('mask', fp_mask)
    #cv2.imshow('transfering target', face_struct_r)
    #cv2.imshow('keeping src', face_struct_s)
    #cv2.imshow('diff', face_struct_s - face_struct_r)
    cv2.waitKey(0)

    rl = face_struct_r+skin_detail_r
    rl = cv2.bitwise_and(rl,rl,mask = face_mask[:,:,0])

    res_lab = cv2.merge((rl, ra, rb))
    res = cv2.cvtColor(res_lab, cv2.COLOR_LAB2BGR)

    fp_mask = find_mask(subject, False)
    res = cv2.bitwise_and(res,res,mask = face_mask[:,:,0])
    res = np.where(face_mask==[0,0,0], subject, res)
    res = np.where(fp_mask==[255,255,255], subject, res)


    #apply lip makeup
    M, lip_map = lip_makeup(subject, warped_target)
    res = np.where(lip_map==[255,255,255], M, res)

    # cv2.imshow('old', res)
    # cv2.waitKey(0)

    res = overlay(subject, res, face_mask[:,:,0])
    
    cv2.imshow('res', res)
    # cv2.imwrite('res.jpg', res)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
Esempio n. 58
0
import cv2 as cv
import numpy as np

src = cv.imread("/home/gyh/opencv/lena.jpg")
cv.imshow("src",src)

img1 = cv.pyrDown(src)
cv.imshow("img1",img1)
#(尺寸变小,分辨率降低,信息丢失)。

img2 = cv.pyrUp(img1)
cv.imshow("img2",img2)
#(尺寸变大,但分辨率不会增加),经过pyrDown之后再用pyrUp,不会增加分辨率。


#拉普拉斯金字塔可以有高斯金字塔计算得来,公式如下:
#L i = G i − pyrUp (G i+1 )

#高斯金字塔
def demo(src1):
    level=3
    temp = src1.copy()
    G=[]
    for i in range(level):
        dst = cv.pyrDown(temp)
        G.append(dst)
        cv.imshow("G"+str(i),dst)
        temp = dst.copy()
    return G

#拉普拉斯金字塔
Esempio n. 59
0
gp_apple_r = [apple_r_copy]
for i in range(6):
    apple_r_copy = cv.pyrDown(apple_r_copy)
    gp_apple_r.append(apple_r_copy)

apple_g_copy = apple_g.copy()
gp_apple_g = [apple_g_copy]
for i in range(6):
    apple_g_copy = cv.pyrDown(apple_g_copy)
    gp_apple_g.append(apple_g_copy)

# step 3: generate laplacian pyramids
apple_r_copy = gp_apple_r[5]
lp_apple_r = [apple_r_copy]
for i in range(5, 0, -1):
    gaussian_expanded = cv.pyrUp(gp_apple_r[i])
    laplacian = cv.subtract(gp_apple_r[i - 1], gaussian_expanded)
    lp_apple_r.append(laplacian)

apple_g_copy = gp_apple_g[5]
lp_apple_g = [apple_g_copy]
for i in range(5, 0, -1):
    gaussian_expanded = cv.pyrUp(gp_apple_g[i])
    laplacian = cv.subtract(gp_apple_g[i - 1], gaussian_expanded)
    lp_apple_g.append(laplacian)

# Step 4: Add both halves
apple_r_g_pyramid = []
n = 0
for apple_r_lap, apple_g_lap in zip(lp_apple_r, lp_apple_g):
    n += 1
def WaveletTransform(image, wavelet):
    coeffs = pywt.dwt2(image, wavelet)
    cA, (cH, cV, cD) = coeffs
    
    #####------For db2 trimming down the image to maintain size consistancy------#####
    if wavelet=='db2':
        cA=cA[1:len(cA),1:len(cA)]
        cH=cH[1:len(cH),1:len(cH)]
        cV=cV[1:len(cV),1:len(cV)]
        cD=cD[1:len(cD),1:len(cD)]
    print 'len(cA) '+str(len(cA))
    print 'len(cH) '+str(len(cH))
    print 'len(cV) '+str(len(cV))
    print 'len(cD) '+str(len(cD))
    
    #####------Scaling the transformed image by 2------#####
    cA=cv2.pyrUp(cA)
    cH=cv2.pyrUp(cH)
    cV=cv2.pyrUp(cV)
    cD=cv2.pyrUp(cD)
   
    print 'len(cA)up '+str(len(cA))
    print 'len(cH)up '+str(len(cH))
    print 'len(cV)up '+str(len(cV))
    print 'len(cD)up '+str(len(cD))
   
    return cA,cH,cV,cD