Example #1
0
def getQuantizedDiff(fullPath,frameId,queryFrame,width,height,numOfBits):

  queryDiffFrames=[]
  #queryFrame=getVideoFrameById(fullPath,QueryFrameId)
  prevFrame=getVideoFrameById(fullPath,frameId)
  nextFrame=getVideoFrameById(fullPath,frameId+2)
  if queryFrame== None or prevFrame==None or nextFrame==None:
    return None

  y=queryFrame
  frameWidth=y.shape[0]
  frameHeight=y.shape[1]
  for i in range(0,frameWidth,width):
    for j in range(0,frameHeight,height):
      yChannel=y[j:j+height,i:i+width]
      blockCoordinates = (i, j)

      prevFrameYChannel=prevFrame[j:j+height,i:i+width]
      diffYChannel=cv2.subtract(yChannel.astype(np.int16),prevFrameYChannel.astype(np.int16))
      flatDiffChannel = np.reshape(diffYChannel, (1, width*height))
      quantize(flatDiffChannel, numOfBits, frameId, blockCoordinates,255,-255,queryDiffFrames)

  y=nextFrame
  prevFrame=queryFrame
  for i in range(0,frameWidth,width):
    for j in range(0,frameHeight,height):
      yChannel=y[j:j+height,i:i+width]
      blockCoordinates = (i, j)

      prevFrameYChannel=prevFrame[j:j+height,i:i+width]
      diffYChannel=cv2.subtract(yChannel.astype(np.int16),prevFrameYChannel.astype(np.int16))
      flatDiffChannel = np.reshape(diffYChannel, (1, width*height))
      quantize(flatDiffChannel, numOfBits, frameId+1, blockCoordinates,255,-255,queryDiffFrames)

  return queryDiffFrames
def ColormapBoundry(image, mean, std, low, high):
    newMin = mean - 0.5*(8-low)*std
    newMax = mean + 0.5*high*std
    newSlope = 255.0/(newMax-newMin)
    cv2.subtract(image, newMin, image)
    cv2.multiply(image, newSlope, image)
    return image.astype("uint8", copy=False)
def read_images(fpath):
    lines = utils.read_image_list(fpath)

    logger.info('loading data: {}'.format(fpath))
    X_data, y_data = [], []
    for inst_path, truth_path in lines:
        inst, truth = [cv2.imread(p, cv2.IMREAD_GRAYSCALE)
                for p in (inst_path, truth_path)]
        assert inst is not None and truth is not None, (inst_path, truth_path)

        pad_h, pad_w = [x / 2 for x in MODEL_INPUT_SHAPE]
        padded = cv2.copyMakeBorder(inst, pad_h, pad_h, pad_w, pad_w,
                               cv2.BORDER_REFLECT)

        m7  = cv2.medianBlur(padded, 7)
        m15 = cv2.medianBlur(padded, 15)

        c7 = 255 - cv2.subtract(m7, padded)
        c15 = 255 - cv2.subtract(m15, padded)

        # (c, h, w) layout
        input = np.array((padded, m7, m15, c7, c15))
        truth = truth.reshape((1,) + truth.shape)

        # pad input image
        X_data.append(input)
        y_data.append(truth)

    return X_data, y_data
def online_variance(new_data,curr_var,curr_iter,curr_mean):
	if curr_iter==1:
		new_mean = new_data;
		new_var = 0;
		return new_mean,new_var;
	else:

		pa=cv2.subtract(new_data,curr_mean);
		pa=cv2.divide(pa,curr_iter,1);
		new_mean=cv2.add(pa,curr_mean);
		#new_mean = curr_mean + (new_data - curr_mean)/curr_iter;
	
		prev_S = curr_var * (curr_iter - 2);
	
		#
		pd1=cv2.subtract(new_data,curr_mean);
		pd2=cv2.subtract(new_data,new_mean);
		pd=cv2.multiply(pd1,pd2);
		new_S=cv2.add(pd,prev_S);
		#new_S = prev_S  + (new_data  - curr_mean) .* (new_data - new_mean);
		
		new_var=cv2.divide(new_S,curr_iter-1);
		#new_var = new_S/(curr_iter - 1);
		
		return new_mean,new_var;
def findStartingPoint(video, numberOfFrames):
    # the function will go numberOfFrames frames ahead in the video and do a subtraction
    # the hope is that the fish will have moved at some point during this time
    count = 0
    # I don't know a better way to jump ahead so many frames
    while count <= numberOfFrames:
        # read in the frame for each tick of the loop
        ret, frame = video.read()
        # grab a frame from the middle. Assume the screens have turned on by then
        if count == numberOfFrames / 2:
            hsv_middle = convertToHSV(frame)
        if count == numberOfFrames:
            # grab the last frame in the numberOfFrames window
            hsv_end = convertToHSV(frame)
        count += 1
        if count == 1:
            print "initializing"
        print "." * (count % 20)

    # now we have masked HSV photos from the beginning, middle, and end of the initialization period
    # now we have to do some funky subtraction to get rid of the signal that results from the screen being turn on
    difference2 = cv2.subtract(hsv_end, hsv_initial)
    difference1 = cv2.subtract(hsv_end, hsv_middle)
    difference3 = cv2.subtract(difference1, difference2)

    # now we have what we want
    thresh = cv2.inRange(difference3, np.array([0, 0, 0]), np.array([255, 255, 25]))
    # need to invert the colors or else we run into trouble on our call to cv2.findContours
    invert = cv2.bitwise_not(thresh)
    startingPoint = returnLargeContour(invert)
    return (startingPoint)
def morphological_skeleton(img, maxIter=1024):
    """Summary

    Args:
        img (TYPE): Description
        maxIter (int, optional): Description

    Returns:
        TYPE: Description
    """
    # url: http://felix.abecassis.me/2011/09/opencv-morphological-skeleton/
    element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))

    height, width = img.shape[:2]
    skel = np.zeros((height, width, 1), np.uint8)
    temp = np.zeros((height, width, 1), np.uint8)

    done = False
    nbIteration = 0
    while not done:
        eroded = cv2.erode(img, element)
        temp = cv2.dilate(eroded, element)
        cv2.subtract(img, temp, temp)
        cv2.bitwise_or(skel, temp, skel)
        img = eroded
        nbIteration += 1
        done = (cv2.countNonZero(img) == 0) or (nbIteration >= maxIter)

    return skel, nbIteration
def get_LaplacePyramid(name1,name2,size):
    """
    Function that creates Laplacian pyramids of two images
    name1, name2: names of files containing the images
    size: number of levels in the lapalacian pyramid
    """
    # We first get the gaussian pyramids of each image. Notice 
    # the function written before is invoked here
    gaussPy1,gaussPy2 = get_GaussPyrimids(name1,name2,size)
    # We create a list of Laplacian pyramids; we initialize each list 
    # with the deeper level (smallest image) of each Gaussian pyramid
    Lapl1 = [gaussPy1[size]]; Lapl2 = [gaussPy2[size]]
    # We loop over each element of the Gaussian pyramid. Notice the 
    # looping begins with the smallest image in the Gaussian pyramid 
    # (deepest level)
    # For each element of the Gaussian pyramid...
    for k in range(size,0,-1):
        # Increase size of images in turn...
        G1 = cv2.pyrUp(gaussPy1[k]); G2 = cv2.pyrUp(gaussPy2[k])
        #print G1.shape, G2.shape
        # ... take respective differences with images in turn ...
        L1 = cv2.subtract(gaussPy1[k-1],G1); L2 = cv2.subtract(gaussPy2[k-1],G2)
        # ... and append to the list of Laplacian pyramids
        #print k , L1.shape, L2.shape
        Lapl1.append(L1); Lapl2.append(L2)
    return Lapl1,Lapl2
Example #8
0
def mask_thinning(img):
    """
    returns the skeleton (thinned image) of a mask.
    This uses `thinning.guo_hall_thinning` if available and otherwise falls back
    to a slow python implementation taken from 
    http://opencvpython.blogspot.com/2012/05/skeletonization-using-opencv-python.html
    """
    try:
        import thinning
    except ImportError:
        # thinning module was not available and we use a python implementation
        size = np.size(img)
        skel = np.zeros(img.shape, np.uint8)
         
        kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
        while True:
            eroded = cv2.erode(img, kernel)
            temp = cv2.dilate(eroded, kernel)
            cv2.subtract(img, temp, temp)
            cv2.bitwise_or(skel, temp, skel)
            img = eroded
         
            zeros = size - cv2.countNonZero(img)
            if zeros==size:
                break
    else:
        # use the imported thinning algorithm
        skel = thinning.guo_hall_thinning(img)
        
    return skel
def main():
    displayer = Displayer()

    A = cv2.cvtColor(fetch_image('apple.jpg'), cv2.COLOR_BGR2RGB)
    B = cv2.cvtColor(fetch_image('orange.jpg'), cv2.COLOR_BGR2RGB)

    # generate Gaussian pyramid for A
    G = A.copy()
    gpA = [G]
    for i in xrange(6):
        G = cv2.pyrDown(G)
        gpA.append(G)

    # generate Gaussian pyramid for B
    G = B.copy()
    gpB = [G]
    for i in xrange(6):
        G = cv2.pyrDown(G)
        gpB.append(G)

    # generate Laplacian Pyramid for A
    lpA = [gpA[5]]
    for i in xrange(5,0,-1):
        GE = cv2.pyrUp(gpA[i])
        L = cv2.subtract(gpA[i-1],GE)
        lpA.append(L)

    # generate Laplacian Pyramid for B
    lpB = [gpB[5]]
    for i in xrange(5,0,-1):
        GE = cv2.pyrUp(gpB[i])
        L = cv2.subtract(gpB[i-1],GE)
        lpB.append(L)

    # Now add left and right halves of images in each level
    LS = []
    for la,lb in zip(lpA,lpB):
        rows,cols,dpt = la.shape
        ls = np.hstack((la[:,0:cols/2], lb[:,cols/2:]))
        LS.append(ls)

    # now reconstruct
    ls_ = LS[0]
    for i in xrange(1,6):
        ls_ = cv2.pyrUp(ls_)
        displayer.add_image(ls_, i)
        ls_ = cv2.add(ls_, LS[i])

    # image with direct connecting each half
    real = np.hstack((A[:,:cols/2],B[:,cols/2:]))

    displayer.add_image(ls_, "pyramid")
    #displayer.add_image(real, "stacked")

    #for i in range(len(lpA)):
    #    displayer.add_image(LS[i], i)

    displayer.display()
def get_object_grass(image):

#     outputx = cv2.resize(output,(649,486))
#     cv2.imshow('image',outputx)
#     cv2.waitKey(0)
#     cv2.destroyAllWindows()
    
    B = image[:,:,0]
    G = image[:,:,1]
    R = image[:,:,2]
    R_ = cv2.multiply(R, 0.25)
    G = cv2.multiply(G, 0.25)
    B = cv2.multiply(B, 0.5)
    R = cv2.subtract(R, R_)
    R = cv2.subtract(R, G)
    new_image = cv2.subtract(R, B)
    
    
    #gray = cv2.add(cv2.add(image[:,:,2],-image[:,:,1]/2),-image[:,:,0]/2)
   
#     output = cv2.resize(new_image,(649,486))
#     cv2.imshow('image',output)
#     cv2.waitKey(0)
#     cv2.destroyAllWindows()
    
    gray = cv2.medianBlur(new_image,3)
    
    #gray = cv2.bilateralFilter(gray,5,75,75)

    # find the contours in the edged image and keep the largest one;
    _, cnts, _ = cv2.findContours(gray.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:10]

    for c in cnts:
	# approximate the contour
        
        
        marker = cv2.minAreaRect(c)
        x,y,w,h = cv2.boundingRect(c)
        coords = (x,y,w,h)
        M = cv2.moments(c)
        cx,cy = int(M['m10']/M['m00']), int(M['m01']/M['m00'])
        center = (cx,cy)
        colors = get_colors(image,coords)

        #print(marker)
        area = marker[1][0]*marker[1][1]
        if ((abs(marker[2]) > 60 or abs(marker[2]) < 40) and area > 7000 and area < 250000
            and marker[1][0]> 50 and marker[1][1] > 50 and colors['blue']<200
            ):

             return (marker,coords,center)
        else:
            continue
        # if our approximated contour has four points, then
        # we can assume that we have found our screen

    return (((0, 0), (0, 0), 0),(0,0,0,0),(0,0))
Example #11
0
def rowStitch(imageA,imageB,fx,switch):
        ##################################################
        # finding information between stiched row images # 
        ##################################################

        # print " rowStitch function : finding homography"
        res_max=-1
        xA1=-1
        yA1=-1
        intervalx=16
        intervaly=16
        temp=imageB[:,:int(imageB.shape[1]*0.35)] #temp                                
        if switch==1:
            temp = cv2.Laplacian(temp,cv2.CV_32F)
        if switch==0:    
            sobelx = cv2.Sobel(temp,cv2.CV_32F,1,0,ksize=11) 
            sobely = cv2.Sobel(temp,cv2.CV_32F,0,1,ksize=11)  
            temp=sobelx+sobely # to get gradient of image in both direction
        temp=cv2.subtract(temp,cv2.mean(temp))
        score=[]
        coor=[]
        steps=16
        intervaly=imageA.shape[0]-100
        for i in range(imageA.shape[1]-int(0.35*imageB.shape[1]),imageA.shape[1],steps):
                for j in range(0,100,steps):
                                        
                                        template=imageA[j:j+intervaly,i:i+intervalx] #template                             
                                        if switch==1:
                                            template = cv2.Laplacian(template,cv2.CV_32F)
                                        if switch==0:
                                            sobelx = cv2.Sobel(template,cv2.CV_32F,1,0,ksize=11) 
                                            sobely = cv2.Sobel(template,cv2.CV_32F,0,1,ksize=11)  
                                            template=sobelx+sobely#to get gradient of image
                                        template=cv2.subtract(template,cv2.mean(template))
        
                                        res=cv2.matchTemplate(temp,template,3)
                                        _, val, _, loc = cv2.minMaxLoc(res)#val stores highest correlation from temp, loc stores coresponding starting location in temp   
                                        if(val > res_max):
                                                res_max=val
                                                xA1=i
                                                yA1=j
                                                xB1=loc[0]
                                                yB1=loc[1]
                                                # print(val)
        # print res_max,"res_max"
        pointsA=[[xA1,yA1],[xA1+intervalx,yA1],[xA1,yA1+intervaly],[xA1+intervalx,yA1+intervaly]]
        pointsB=[[xB1,yB1],[xB1+intervalx,yB1],[xB1,yB1+intervaly],[xB1+intervalx,yB1+intervaly]]
        xB1=xB1*(1/fx)
        yB1=yB1*(1/fx)
        xA1=xA1*(1/fx)
        yA1=yA1*(1/fx)
        pointsA=[[xA1,yA1],[xA1+intervalx,yA1],[xA1,yA1+intervaly],[xA1+intervalx,yA1+intervaly]]
        pointsB=[[xB1,yB1],[xB1+intervalx,yB1],[xB1,yB1+intervaly],[xB1+intervalx,yB1+intervaly]]
        H,mask=cv2.findHomography(np.asarray(pointsB,float),np.asarray(pointsA,float),cv2.RANSAC,3)
        return H,res_max
def gen_multi_channel(padded):
    m7  = cv2.medianBlur(padded, 7)
    m15 = cv2.medianBlur(padded, 15)

    c7 = 255 - cv2.subtract(m7, padded)
    c15 = 255 - cv2.subtract(m15, padded)


    # (c, h, w) layout
    input = np.array((padded, m7, m15, c7, c15))
    return input
def removeNonRed(img):
    img = cv2.cvtColor(img,cv2.COLOR_HSV2BGR)
    b,g,r = cv2.split(img)
    deltaB = cv2.subtract(r,b)
    deltaG = cv2.subtract(r,g)
    r2 = cv2.bitwise_and(r,r,mask=deltaB)
    g2 = cv2.bitwise_and(r2,r2,mask=deltaG)
    img = cv2.merge((r2,r2,r2))
    #cv2.imshow('test',img)
    img = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
    return img;
def problem_7(imgA,imgB):
    #Another solution using image pyramids
    #For Gaussain Pyramids
    diffLevels_A=imgA.copy()
    GaussPyr_A=[]
    GaussPyr_A.append(diffLevels_A)         # Gaussian Pyramids for imgA
    for itr in range(4):
        diffLevels_A=cv2.pyrDown(diffLevels_A)
        GaussPyr_A.append(diffLevels_A)
    diffLevels_B=imgB.copy()                # Gaussian Pyramids for imgB
    GaussPyr_B=[];GaussPyr_B.append(diffLevels_B)
    for itr in range(4):
        diffLevels_B=cv2.pyrDown(diffLevels_B)
        GaussPyr_B.append(diffLevels_B)

    #For Laplacian Pyramids   (Laplacian pyramids will be appended from lowest resolution to highest resolution)
    LaplacePyr_A=[GaussPyr_A[3]]   #Since we start building the Laplacian pyramids from the bottom
    for itr in range(3,0,-1):
        temp_A=cv2.pyrUp(GaussPyr_A[itr])
        d=(GaussPyr_A[itr-1].shape[0],GaussPyr_A[itr-1].shape[1],3)
        temp_A=np.resize(temp_A,d)
        LDiff=cv2.subtract(GaussPyr_A[itr-1],temp_A)      #Bcoz "GaussPyr_A[itr-1]" has a higher resolution than "GaussPyr_A[itr]"
        LaplacePyr_A.append(LDiff)

    LaplacePyr_B=[GaussPyr_B[3]]
    for itr in range(3,0,-1):
        temp_B=cv2.pyrUp(GaussPyr_B[itr])
        d=(GaussPyr_B[itr-1].shape[0],GaussPyr_B[itr-1].shape[1],3)
        temp_B=np.resize(temp_B,d)
        LDiff=cv2.subtract(GaussPyr_B[itr-1],temp_B)
        LaplacePyr_B.append(LDiff)

    #Blending the two Laplacian Pyramids (all resolution levels)
    Blend=[]
    #Note: Blend will have pyramids blended from lower to higher resolution
    for LapA,LapB in zip(LaplacePyr_A,LaplacePyr_B):
        Lr,Lc,dimension=LapA.shape
        temp=np.hstack((LapA[:,0:Lc/2],LapB[:,Lc/2:]))
        # Laplacian pyramid at each level is blended. This will help reconstruction of image
        Blend.append(temp)

    #Reconstructing the Image from the pyramids (Laplcian to Gaussian)
    final_temp=Blend[0]
    for itr in range(1,4):
        final_temp=cv2.pyrUp(final_temp)
        d=(Blend[itr].shape[0],Blend[itr].shape[1],3)
        final_temp=np.resize(final_temp,d)
        final_temp=cv2.add(final_temp,Blend[itr])       #L[i]=G[i]-G[i-1]..diff of gaussian..So, G[i]=L[i]+G[i-1]

    final_img=np.hstack((imgA[:,0:Lc/2],imgB[:,Lc/2:]))
    cv2.imshow("Final Blended Image",final_temp)
    cv2.imwrite("P_7.jpg",final_temp)
    cv2.waitKey(0)
Example #15
0
def blend(image1, image2, mask):
    # generate Gaussian pyramid for image 1
    G = image1.astype(np.float32)
    gpA = [G]
    for i in xrange(6):
        G = cv2.pyrDown(G)
        gpA.append(G.astype(np.float32))

    # generate Gaussian pyramid for image 2
    G = image2.astype(np.float32)
    gpB = [G]
    for i in xrange(6):
        G = cv2.pyrDown(G)
        gpB.append(G.astype(np.float32))

    # generate Gaussian pyramid for mask
    G = mask.astype(np.float32)
    gpM = [G]
    for i in xrange(6):
        G = cv2.pyrDown(G)
        gpM.append(G.astype(np.float32))

    # generate Laplacian Pyramid for image 1
    lpA = [gpA[5]]
    for i in xrange(5,0,-1):
        rows,cols = gpA[i-1].shape[:2]
        GE = cv2.pyrUp(gpA[i])[:rows,:cols]
        L = cv2.subtract(gpA[i-1],GE)
        lpA.append(L)

    # generate Laplacian Pyramid for image 2
    lpB = [gpB[5]]
    for i in xrange(5,0,-1):
        rows,cols = gpB[i-1].shape[:2]
        GE = cv2.pyrUp(gpB[i])[:rows,:cols]
        L = cv2.subtract(gpB[i-1],GE)
        lpB.append(L)

    # Now add the images with mask
    LS = []
    length = len(lpA)
    for i in range(length):
        LS.append(lpB[i]*gpM[length-i-1] + lpA[i]*(1-gpM[length-i-1]))

    # now reconstruct
    ls_ = LS[0]
    for i in xrange(1,6):
        rows,cols = LS[i].shape[:2]
        ls_ = cv2.pyrUp(ls_)[:rows,:cols]
        ls_ = cv2.add(ls_, LS[i])
    ls_ = np.clip(ls_, 0, 255)
    return ls_.astype(np.uint8)
Example #16
0
def test_on_video(net, vid_fn, out_fn):
    # capture
    # cap = cv.VideoCapture(0)
    cap = cv.VideoCapture(vid_fn)
    fps = cap.get(cv.cv.CV_CAP_PROP_FPS)
    ret, img = cap.read()

    # writer
    orig_shape = (227, 227)  # img.shape
    fourcc = cv.cv.CV_FOURCC(*'XVID')
    wri = cv.VideoWriter(out_fn, fourcc, fps, orig_shape)
    frame = 0
    print('before read')

##    print(cap)
##    print(fps)
##    print(vid_fn)
##    print(cap.isOpened())
    while(cap.isOpened()):
        ret, img = cap.read()

        img_mean, img_std = cv.meanStdDev(img)

        r = cv.subtract(img[:,:,0], img_mean[0])
        g = cv.subtract(img[:,:,1], img_mean[1])
        b = cv.subtract(img[:,:,2], img_mean[2])

        r = cv.divide(r, img_std[0])
        g = cv.divide(g, img_std[1])
        b = cv.divide(b, img_std[2])

        test_img = np.zeros(img.shape)
        test_img[:,:,0] = r
        test_img[:,:,1] = g
        test_img[:,:,2] = b
        if ret:
            img = draw_joints_all(img, test_img, net)
            img = cv.resize(img, (orig_shape[1], orig_shape[0]))
            wri.write(img)
            cv.imshow("frame", img)
            frame += 1
            print frame
            if cv.waitKey(1) & 0xFF == ord('q'):
                break
        else:
            print 'finished'
            break
    cap.release()
    wri.release()
    cv.destroyAllWindows()
Example #17
0
def mask_thinning(img, method='auto'):
    """
    returns the skeleton (thinned image) of a mask.
    This uses `thinning.guo_hall_thinning` if available and otherwise falls back
    to a slow python implementation taken from 
    http://opencvpython.blogspot.com/2012/05/skeletonization-using-opencv-python.html
    Note that this implementation is not equivalent to guo_hall implementation
    """
    # try importing the thinning module
    try:
        import thinning
    except ImportError:
        thinning = None
    
    # determine the method to use if automatic method is requested
    if method == 'auto':
        if thinning is None:
            method = 'python'
        else:
            method = 'guo-hall'
    
    # do the thinning with the requested method
    if method == 'guo-hall':
        if thinning is None:
            raise ImportError('Using the `guo-hall` method for thinning '
                              'requires the `thinning` module, which could not '
                              'be imported.')
        skel = thinning.guo_hall_thinning(img)
    
    elif method =='python':
        # thinning module was not available and we use a python implementation
        size = np.size(img)
        skel = np.zeros(img.shape, np.uint8)
         
        kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
        while True:
            eroded = cv2.erode(img, kernel)
            temp = cv2.dilate(eroded, kernel)
            cv2.subtract(img, temp, temp)
            cv2.bitwise_or(skel, temp, skel)
            img = eroded
         
            zeros = size - cv2.countNonZero(img)
            if zeros==size:
                break
            
    else:
        raise ValueError('Unknown thinning method `%s`' % method)
        
    return skel
Example #18
0
def laplacian_pyramid_blending(img_in1, img_in2):

    # Write laplacian pyramid blending codes here

    img_in1 = img_in1[:, :img_in1.shape[0]]
    img_in2 = img_in2[:img_in1.shape[0], :img_in1.shape[0]]

    # generate Gaussian pyramid for A
    G = img_in1.copy()
    gpA = [G]
    for i in xrange(6):
        G = cv2.pyrDown(G)
        gpA.append(G)

    # generate Gaussian pyramid for B
    G = img_in2.copy()
    gpB = [G]
    for i in xrange(6):
        G = cv2.pyrDown(G)
        gpB.append(G)

    # generate Laplacian Pyramid for A
    lpA = [gpA[5]]
    for i in xrange(5, 0, -1):
        GE = cv2.pyrUp(gpA[i])
        L = cv2.subtract(gpA[i - 1], GE)
        lpA.append(L)

    # generate Laplacian Pyramid for B
    lpB = [gpB[5]]
    for i in xrange(5, 0, -1):
        GE = cv2.pyrUp(gpB[i])
        L = cv2.subtract(gpB[i - 1], GE)
        lpB.append(L)

    # Now add left and right halves of images in each level
    LS = []
    for la, lb in zip(lpA, lpB):
        rows, cols, dpt = la.shape
        ls = np.hstack((la[:, 0:cols / 2], lb[:, cols / 2:]))
        LS.append(ls)

    # now reconstruct
    ls_ = LS[0]
    for i in xrange(1, 6):
        ls_ = cv2.pyrUp(ls_)
        ls_ = cv2.add(ls_, LS[i])

    img_out = ls_  # Blending result
    return True, img_out
def blend(image, tilesize):
    b=1
    a = len(image)
    l = len(image[0])
    p= image[0:tileSize,30:tileSize]
    for j in range(0,len(image)-tileSize,tileSize):
        for k in range(0,len(image[0])-tileSize,tileSize):

            A = image[j+tileSize-3:j+tileSize,k+tileSize-3:k+tileSize]
            G = A.copy()
            gpA = [G]
            for i in xrange(3):
                G = cv2.pyrDown(G)
                gpA.append(G)
            B = image[j:j+tileSize,k+tileSize:k+tileSize+tileSize]
            G = B.copy()
            gpB = [G]
            for i in xrange(3):
                G = cv2.pyrDown(G)
                gpB.append(G)
            lpA = [gpA[2]]
            for i in xrange(2,0,-1):
                GE = cv2.pyrUp(gpA[i])
                b = GE[0:len(gpA[i-1]),0:len(gpA[i-1])]
                L = cv2.subtract(gpA[i-1],b)
                lpA.append(L)
            lpB = [gpB[2]]
            for i in xrange(2,0,-1):
                GE = cv2.pyrUp(gpB[i])
                b = GE[0:len(gpB[i-1]),0:len(gpB[i-1])]
                L = cv2.subtract(gpB[i-1],b)
                lpB.append(L)
             # Now add left and right halves of images in each level
            LS = []
            for la,lb in zip(lpA,lpB):
                rows,cols,dpt = la.shape
                p =la[:,0:2]
                pp= lb[:,cols/2:]
                ls = np.hstack((la[:,0:2], lb[:,cols/2:]))
                LS.append(ls)
            ls_ = LS[0]
            # now reconstruct
            for i in xrange(1,3):
                ls_ = cv2.pyrUp(ls_)
                b = ls_[0:len(LS[i]),0:len(LS[i])]
                ls_ = cv2.add(b, LS[i])
             #cv2.imwrite('Pyramid_blending2.jpg',ls_)
            image[j+tileSize-2:j+tileSize-2+tileSize,k+tileSize-2:k+tileSize-2+tileSize] = ls_
    return image
Example #20
0
def findLaserImage(image, background, threshold, mask=None):
  diff = cv2.subtract(image, background)
  #cv2.imwrite('diff.png', diff)
  channels = cv2.split(diff)
  red = channels[2]
  #red = cv2.addWeighted(channels[0], 1/3.0, channels[1], 1/3.0, 0)
  #red = cv2.addWeighted(red, 1.0, channels[2], 1.0, 0)
  cv2.imwrite('red.png', red)
  if mask is not None:
    cv2.subtract(red, mask, red)
  retval, mask = cv2.threshold(red, threshold, 255, cv2.THRESH_BINARY)
  result = mask
  result = cv2.medianBlur(mask, 7)
  #cv2.imwrite('laser-mask.png', result)
  return result
Example #21
0
def shift_hueCV2(frame, shift, mode="normal"):
    #shift hue by desired amount
    frame += shift
    
    if mode=="normal":
        pass
        
    elif mode=="absolute":
        frame = cv2.subtract(127 , cv2.absdiff(frame,127))
        frame = cv2.multiply(2,frame)
        
    elif mode=="inverted":
        frame = cv2.subtract(255-frame)
        
    return frame
Example #22
0
 def process_frame(self, frame):
     if self.background is None:
         self.background = np.zeros(frame.shape, dtype=np.int16)
     if self.frame is not None:
         mframe = (frame + self.frame) >> 1
     else:
         mframe = frame
     self.frame = frame
     frame = mframe
     mean = int(cv2.mean(self.background)[0])
     frame = cv2.subtract(frame, self.background - mean)
     frame[frame < 0] = 0
     frame = cv2.subtract(np.uint16(frame), mean)
     #frame = cv2.subtract(np.uint16(frame), np.uint16(self.background))
     return np.int16(frame)
Example #23
0
def process_frame(frame):
    """ Process frame based on user input """
    hue = cv2.getTrackbarPos(tbar_channel_select_name, win_debug_name)
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    frame = frame[:,:,0]
    
    #shift hue by desired amount
    frame += hue
    mode = 'normal'
    
    if mode=="normal":
        pass
        
    elif mode=="absolute":
        frame = cv2.subtract(127 , cv2.absdiff(frame,127))
        frame = cv2.multiply(2,frame)
        
    elif mode=="inverted":
        frame = cv2.subtract(255-frame)
        

    block_size = cv2.getTrackbarPos(tbar_block_size_name, win_debug_name)
    threshold = cv2.getTrackbarPos(tbar_thresh_name, win_debug_name)

    if not block_size % 2 == 1:
        block_size += 1
        cv2.setTrackbarPos(tbar_block_size_name, win_debug_name, block_size)

    if block_size <= 1:
        block_size = 3
        cv2.setTrackbarPos(tbar_block_size_name, win_debug_name, block_size)

    adaptive = cv2.adaptiveThreshold(frame, 255,
                                     cv2.ADAPTIVE_THRESH_MEAN_C,
                                     cv2.THRESH_BINARY_INV,
                                     block_size,
                                     threshold)

    if draw_contours:
        cframe = np.zeros((frame.shape[0], frame.shape[1], 3), np.uint8)
        contours, hierarchy = cv2.findContours(adaptive,
                                               cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_SIMPLE)

        cv2.drawContours(cframe, contours, -1, (255, 255, 255), 3)
        return cframe
    else:
        return adaptive
def skeletonization(img):
    '''
    http://opencvpython.blogspot.ru/2012/05/skeletonization-using-opencv-python.html
    '''
    img = img.copy()
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    size = np.size(img)
    skel = np.zeros(img.shape, np.uint8)

    # ret, img = cv2.threshold(img, 127, 255, 0)
    img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 7, 2)
    element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))

    while True:
        eroded = cv2.erode(img, element)
        temp = cv2.dilate(eroded, element)
        temp = cv2.subtract(img, temp)
        skel = cv2.bitwise_or(skel, temp)
        img = eroded.copy()

        zeros = size - cv2.countNonZero(img)
        if zeros == size:
            break

    cv2.imwrite("skel.png", skel)
    return skel
Example #25
0
    def centroMasa(self,color):
        lower = color
        upper = color

        lower = cv2.subtract(lower,Captura.errorColor)
        upper = cv2.add(upper,Captura.errorColor)


        img = cv2.blur(self.img,(10,10))

        hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

        thres = cv2.inRange(hsv_img, lower, upper)

        moments = cv2.moments(thres, 0)

        area = moments['m00']

        vector = None
        if(area > 1000):
            x = (np.uint32)(moments['m10']/area)
            y = (np.uint32)(moments['m01']/area)
            vector = [x, y]

        return vector
  def describe(self, image):
    image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    features = []

    (h, w) = image.shape[:2]
    (cX, cY) = (int(w * 0.5), int(h * 0.5))

    segments = [(0, cX, 0, cY), (cX, w, 0, cY), (cX, w, cY, h), (0, cX, cY, h)]

    (xL, yL) = (int(w * 0.75) / 2, int(h * 0.75) / 2)
    elli = np.zeros(image.shape[:2], dtype = 'uint8')
    cv2.ellipse(elli, (cX, cY), (xL, yL), 0, 0, 360, 255, -1)

    for (x0, x1, y0, y1) in segments:
      rect = np.zeros(image.shape[:2], dtype = 'uint8')
      cv2.rectangle(rect, (x0, y0), (x1, y1), 255, -1)
      rect = cv2.subtract(rect, elli)

      hist = self.histogram(image, rect)
      features.extend(hist)

    hist = self.histogram(image, elli)
    features.extend(hist)

    return features
Example #27
0
 def rgbfilter_white(self, image,image_bg):
     rd = 2
     rd2 = 100
     diff = cv2.subtract(cv2.cvtColor(image,cv2.COLOR_BGR2GRAY),image_bg)
     res = cv2.compare(diff,rd,cv2.CMP_GT)
     res1 = cv2.compare(diff,rd2,cv2.CMP_LT)
     return cv2.bitwise_and(res,res1)
Example #28
0
def water(img, thresh):
    kernel = np.ones((3,3),np.uint8)
    opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 2)

    # sure background area
    sure_bg = cv2.dilate(opening,kernel,iterations=3)

    # Finding sure foreground area
    dist_transform = cv2.distanceTransform(opening,2,5)
    ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)

    # Finding unknown region
    sure_fg = np.uint8(sure_fg)
    unknown = cv2.subtract(sure_bg,sure_fg)

    # Marker labelling
    ret, markers = cv2.connectedComponents(sure_fg)

    # Add one to all labels so that sure background is not 0, but 1
    markers += 1

    # Now, mark the region of unknown with zero
    markers[unknown==255] = 0

    markers = cv2.watershed(img,markers)
    img[markers == -1] = [255,0,0]
    return sure_fg, sure_bg, markers, img
Example #29
0
def skeletonize(image, size, structuring=cv2.MORPH_RECT):
    # determine the area (i.e. total number of pixels in the image),
    # initialize the output skeletonized image, and construct the
    # morphological structuring element
    area = image.shape[0] * image.shape[1]
    skeleton = np.zeros(image.shape, dtype="uint8")
    elem = cv2.getStructuringElement(structuring, size)

    # keep looping until the erosions remove all pixels from the
    # image
    while True:
        # erode and dilate the image using the structuring element
        eroded = cv2.erode(image, elem)
        temp = cv2.dilate(eroded, elem)

        # subtract the temporary image from the original, eroded
        # image, then take the bitwise 'or' between the skeleton
        # and the temporary image
        temp = cv2.subtract(image, temp)
        skeleton = cv2.bitwise_or(skeleton, temp)
        image = eroded.copy()

        # if there are no more 'white' pixels in the image, then
        # break from the loop
        if area == area - cv2.countNonZero(image):
            break

    # return the skeletonized image
    return skeleton
Example #30
0
File: bomba.py Project: ncos/checkq
    def get(self):
        r1 = cv2.getTrackbarPos('r1', self.windowname)
        r2 = cv2.getTrackbarPos('r2', self.windowname)
        r3 = cv2.getTrackbarPos('r3', self.windowname)
        l1 = cv2.getTrackbarPos('l1', self.windowname)
        l2 = cv2.getTrackbarPos('l2', self.windowname)
        l3 = cv2.getTrackbarPos('l3', self.windowname)


        # Remove light     
        h, s, v = cv2.split(self.orig_hsv)
        kernel = np.ones((9*2+1, 9*2+1), np.uint8)
        v_dilated = cv2.dilate(v, kernel, iterations = 1)
        v_out = cv2.subtract(v_dilated, v)

        #ret, v_t = cv2.threshold(v, l3, r3, cv2.THRESH_TRUNC)
        
        # Binarization
        #ret, ots = cv2.threshold(v_out, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)

        #et, ots2 = cv2.threshold(v, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)


        #self.hist(v_out)
        #for i in xrange(l1):
        #    ret, mask = cv2.threshold(v_out, l2, 255, cv2.THRESH_TOZERO)
        #    v_out = cv2.bitwise_and(v_out, mask)
        #    v_out = cv2.add(v_out, (v_out/l3))

        v_out = cv2.bitwise_not(v_out)

        th3 = cv2.adaptiveThreshold(v, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,l1*2+1,l2)
        th4 = cv2.adaptiveThreshold(v_out, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,l1*2+1,l2)

        return [v_out, th3, th4]
Example #31
0
dilation2 = cv2.dilate(g, kernel2)

# plot all the images and their histograms
gradient_image = np.array(dilation1) - np.array(dilation2)
cv2.imshow('gradient_image', gradient_image)
#blur = cv2.GaussianBlur(gradient_image,(5,5),0)
#cv2.imshow('blur',blur)
#ret3,gradient_image_th = cv2.threshold(gradient_image,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
#cv2.imshow('image',gradient_image_th)
#gradient_image_th_eroded=cv2.erode(gradient_image_th,kernel3,iterations = 1)
#res = cv2.bitwise_and(gradient_image_th,gradient_image_th, mask= gradient_image)
#cv2.imshow('final',res);
#gradient_image_th_eroded_erode=cv2.erode(gradient_image_th_eroded,kernel3,iterations = 1)
#cv2.imshow('gradient_image_th_eroded',gradient_image_th_eroded)
#cv2.imshow('gradient_image_th_eroded_erode',gradient_image_th_eroded_erode)
#opening = cv2.morphologyEx(gradient_image_th_eroded, cv2.MORPH_OPEN, kernel1)
#closing = cv2.morphologyEx(gradient_image_th_eroded, cv2.MORPH_CLOSE, kernel1)
#cv2.imshow('Opening',opening)
#cv2.imshow('Closing',closing)

#print(np.array(gradient_image_th).shape)
#print(np.array(bv002).shape)
gradient_image_th_wbv = cv2.subtract(gradient_image, g1)
cv2.imshow('gradient_image_th_wbv', gradient_image_th_wbv)
gradient_image_th_wbvod = cv2.subtract(gradient_image_th_wbv, g2)
#im = cv2.imfill(gradient_image_th,'holes');
cv2.imshow('gradient_image_th_wbvod', gradient_image_th_wbvod)

cv2.waitKey(0)
cv2.destroyAllWindows()
Example #32
0
import cv2
import numpy as np
# Image Airthmetics

img= cv2.imread("download.jpg")
cv2.imshow("Orgnal", img)

cv2.waitKey(0)
M=np.ones(img.shape, dtype="uint8") *150

# Alternate method for matrix
# M1=np.zeros(img.shape, dtype="uint8") + 150

added=cv2.add(img, M)
cv2.imshow("Added", added)

substracted=cv2.subtract(img, M)
cv2.imshow("Substracted", substracted)

mul=cv2.multiply(img, M)
cv2.imshow("Mul", mul)

cv2.waitKey(0)
cv2.destroyAllWindows()

img = cv2.imread(path)
img, scale = resize_im(img, scale=1000, max_scale=1000)
#cv2.imshow('original' , img)

gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#cv2.imshow('gray', gray)

# clahe = cv2.createCLAHE(clipLimit=2, tileGridSize=(100, 100))
# cl1 = clahe.apply(gray)

# cv2.imshow('clahe', cl1)

gradX = cv2.Sobel(gray, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1)
gradY = cv2.Sobel(gray, ddepth=cv2.CV_32F, dx=0, dy=1, ksize=-1)
gradient = cv2.subtract(gradX, gradY)
gradient = cv2.convertScaleAbs(gradient)
#cv2.imshow("g", gradient)

blurred = cv2.blur(gradient, (3, 3)) # 9*9的核做模糊
(_, thresh) = cv2.threshold(blurred, 150, 255, cv2.THRESH_BINARY)
#cv2.imshow("binary", thresh)

closed = cv2.erode(thresh, None, iterations=3)
closed = cv2.dilate(closed, None, iterations=2)
#cv2.imshow("f", closed)

thresh = closed
# 获得水平和竖直方向的像素均值
h, w = thresh.shape
hor_mean = [np.array(thresh[i, :]).mean() for i in range(h)]
Example #34
0
                            cv2.THRESH_BINARY, 11, 2)
plt.figure(figsize=(20, 20))

# dilation - erode with / without blur
kernel = np.ones((3, 3), np.uint8)
dil = cv2.dilate(blur, kernel, iterations=1)
ero = cv2.erode(blur, kernel, iterations=1)
morph = dil - ero

kernel2 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))

topHat = cv2.morphologyEx(imgray, cv2.MORPH_TOPHAT, kernel2)
blackHat = cv2.morphologyEx(imgray, cv2.MORPH_BLACKHAT, kernel2)

imgGrayscalePlusTopHat = cv2.add(imgray, topHat)
subtract = cv2.subtract(imgGrayscalePlusTopHat, blackHat)
thr2 = cv2.adaptiveThreshold(subtract, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                             cv2.THRESH_BINARY, 11, 2)
plt.figure(figsize=(12, 8))
plt.subplot(221), plt.imshow(blur, 'gray')
plt.title("blurred")
plt.subplot(222), plt.imshow(thr, 'gray')
plt.title("after Adaptive Threshold")
plt.subplot(223), plt.imshow(morph, 'gray')
plt.title("Dilation - Erode (with blur)")
plt.subplot(224), plt.imshow(thr2, 'gray')
plt.title("top-black AT")
plt.savefig("Preprocess")
#plt.show()

# canny 하지 않고
Example #35
0
    def get_license_plate_char(self, image):
        # Read image
        img_ori = image

        if type(image) == str:
            if platform.system().lower() == 'windows' and image[0] == '~':
                image = os.environ['USERPROFILE'] + image[1:]
            image = os.path.abspath(image)
            img_ori = cv2.imread(image)

        if type(img_ori) is not np.ndarray:
            raise ValueError('ERROR: invalid image!')

        height, width, channel = img_ori.shape

        # Convert image to grayscale
        gray = cv2.cvtColor(img_ori, cv2.COLOR_BGR2GRAY)

        # Maximize constrast
        structuringElement = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
        imgTopHat = cv2.morphologyEx(gray, cv2.MORPH_TOPHAT,
                                     structuringElement)
        imgBlackHat = cv2.morphologyEx(gray, cv2.MORPH_BLACKHAT,
                                       structuringElement)
        imgGrayscalePlusTopHat = cv2.add(gray, imgTopHat)
        gray = cv2.subtract(imgGrayscalePlusTopHat, imgBlackHat)

        # Thresholding
        img_blurred = cv2.GaussianBlur(gray, ksize=(5, 5), sigmaX=0)
        usingAdaptive = True
        if usingAdaptive:
            img_thresh = cv2.adaptiveThreshold(
                img_blurred,
                maxValue=255.0,
                adaptiveMethod=cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                thresholdType=cv2.THRESH_BINARY_INV,
                blockSize=19,
                C=12)
        else:
            _, img_thresh = cv2.threshold(img_blurred,
                                          thresh=0,
                                          maxval=255,
                                          type=cv2.THRESH_BINARY_INV
                                          | cv2.THRESH_OTSU)

        # Find contours
        contours, _ = cv2.findContours(img_thresh,
                                       mode=cv2.RETR_LIST,
                                       method=cv2.CHAIN_APPROX_SIMPLE)

        # Prepare data
        contours_dict = []
        for contour in contours:
            x, y, w, h = cv2.boundingRect(contour)
            # instert to dict
            contours_dict.append({
                'contour': contour,
                'x': x,
                'y': y,
                'w': w,
                'h': h,
                'cx': x + (w / 2),
                'cy': y + (h / 2)
            })

        # Select candidates by char size
        MIN_AREA = 80
        MIN_WIDTH, MIN_HEIGHT = 2, 8
        MIN_RATIO, MAX_RATIO = 0.25, 1.0

        possible_contours = []
        cnt = 0
        for d in contours_dict:
            area = d['w'] * d['h']
            ratio = d['w'] / d['h']
            if area > MIN_AREA \
            and d['w'] > MIN_WIDTH and d['h'] > MIN_HEIGHT \
            and MIN_RATIO < ratio < MAX_RATIO:
                d['idx'] = cnt
                cnt += 1
                possible_contours.append(d)

        # Select candidates by arrangement of contours
        MAX_DIAG_MULTIPLYER = 5  # 5
        MAX_ANGLE_DIFF = 12.0  # 12.0
        MAX_AREA_DIFF = 0.5  # 0.5
        MAX_WIDTH_DIFF = 0.8  # 0.8
        MAX_HEIGHT_DIFF = 0.2  # 0.2
        MIN_N_MATCHED = 5  # 3

        def find_chars(contour_list):
            matched_result_idx = []
            for d1 in contour_list:
                matched_contours_idx = []
                for d2 in contour_list:
                    if d1['idx'] == d2['idx']:
                        continue
                    dx = abs(d1['cx'] - d2['cx'])
                    dy = abs(d1['cy'] - d2['cy'])
                    diagonal_length = np.sqrt(d1['w']**2 + d1['h']**2)
                    distance = np.linalg.norm(
                        np.array((d1['cx'], d1['cy'])) -
                        np.array((d2['cx'], d2['cy'])))
                    if dx == 0:
                        angle_diff = 90
                    else:
                        angle_diff = np.degrees(np.arctan(dy / dx))
                    area_diff = abs(d1['w'] * d1['h'] -
                                    d2['w'] * d2['h']) / (d1['w'] * d1['h'])
                    width_diff = abs(d1['w'] - d2['w']) / d1['w']
                    height_diff = abs(d1['h'] - d2['h']) / d1['h']
                    if distance < diagonal_length * MAX_DIAG_MULTIPLYER \
                    and angle_diff < MAX_ANGLE_DIFF and area_diff < MAX_AREA_DIFF \
                    and width_diff < MAX_WIDTH_DIFF and height_diff < MAX_HEIGHT_DIFF:
                        matched_contours_idx.append(d2['idx'])
                # append this contour
                matched_contours_idx.append(d1['idx'])

                if len(matched_contours_idx) < MIN_N_MATCHED:
                    continue

                matched_result_idx.append(matched_contours_idx)

                unmatched_contour_idx = []
                for d4 in contour_list:
                    if d4['idx'] not in matched_contours_idx:
                        unmatched_contour_idx.append(d4['idx'])
                unmatched_contour = np.take(possible_contours,
                                            unmatched_contour_idx)

                # recursive
                recursive_contour_list = find_chars(unmatched_contour)
                for idx in recursive_contour_list:
                    matched_result_idx.append(idx)

                break

            # optimizing
            ret = []
            for idx_list in matched_result_idx:
                matched_contour = np.take(possible_contours, idx_list)
                sorted_contour = sorted(matched_contour, key=lambda x: x['x'])
                matched = []
                for i in range(len(sorted_contour) - 1):
                    d1 = sorted_contour[i]
                    d2 = sorted_contour[i + 1]
                    if len(matched) == 0:
                        matched.append(d1['idx'])
                    diagonal_length = np.sqrt(d1['w']**2 + d1['h']**2)
                    distance = np.linalg.norm(
                        np.array((d1['cx'], d1['cy'])) -
                        np.array((d2['cx'], d2['cy'])))
                    if distance > diagonal_length * 3:
                        sorted_contour = sorted_contour[:i + 1]
                        break
                    matched.append(d2['idx'])
                if len(matched) > 0:
                    ret.append(matched)

            # return matched_result_idx
            return ret

        result_idx = find_chars(possible_contours)

        matched_result = []
        for idx_list in result_idx:
            matched_result.append(np.take(possible_contours, idx_list))

        # Rotate plate image
        PLATE_WIDTH_PADDING = 1.1
        # PLATE_HEIGHT_PADDING = 1.1
        MIN_PLATE_RATIO = 3
        MAX_PLATE_RATIO = 12

        plate_imgs = []
        plate_infos = []
        for matched_chars in matched_result:
            sorted_chars = sorted(matched_chars, key=lambda x: x['x'])
            plate_cx = (sorted_chars[0]['cx'] + sorted_chars[-1]['cx']) / 2
            plate_cy = (sorted_chars[0]['cy'] + sorted_chars[-1]['cy']) / 2
            plate_width = (sorted_chars[-1]['x'] + sorted_chars[-1]['w'] -
                           sorted_chars[0]['x']) * PLATE_WIDTH_PADDING
            sum_height = 0
            for d in sorted_chars:
                sum_height += d['h']
            plate_height = int(sum_height / len(sorted_chars) *
                               PLATE_WIDTH_PADDING)

            triangle_height = sorted_chars[-1]['cy'] - sorted_chars[0]['cy']
            triangle_hypotenus = np.linalg.norm(
                np.array((sorted_chars[0]['cx'], sorted_chars[0]['cy'])) -
                np.array((sorted_chars[-1]['cx'], sorted_chars[-1]['cy'])))
            angle = np.degrees(np.arcsin(triangle_height / triangle_hypotenus))
            rotation_matrix = cv2.getRotationMatrix2D(center=(plate_cx,
                                                              plate_cy),
                                                      angle=angle,
                                                      scale=1.0)

            img_rotated = cv2.warpAffine(img_thresh,
                                         M=rotation_matrix,
                                         dsize=(width, height))
            img_cropped = cv2.getRectSubPix(img_rotated,
                                            patchSize=(int(plate_width),
                                                       int(plate_height)),
                                            center=(int(plate_cx),
                                                    int(plate_cy)))

            ratio = img_cropped.shape[1] / img_cropped.shape[0]
            if ratio < MIN_PLATE_RATIO or ratio > MAX_PLATE_RATIO:
                continue

            plate_imgs.append(img_cropped)
            plate_infos.append({
                'x': int(plate_cx - plate_width / 2),
                'y': int(plate_cy - plate_height / 2),
                'w': int(plate_width),
                'h': int(plate_height)
            })

        # Another thresholding to find chars
        longest_idx, longest_text = -1, 0
        plate_chars = []

        for i, plate_img in enumerate(plate_imgs):
            plate_img = cv2.resize(plate_img, dsize=(0, 0), fx=1.6, fy=1.6)
            _, plate_img = cv2.threshold(plate_img,
                                         thresh=0.0,
                                         maxval=255.0,
                                         type=cv2.THRESH_BINARY
                                         | cv2.THRESH_OTSU)

            # find contours again (same as above)
            contours, _ = cv2.findContours(plate_img,
                                           mode=cv2.RETR_LIST,
                                           method=cv2.CHAIN_APPROX_SIMPLE)
            plate_min_x, plate_min_y = plate_img.shape[1], plate_img.shape[0]
            plate_max_x, plate_max_y = 0, 0

            for contour in contours:
                x, y, w, h = cv2.boundingRect(contour)
                area = w * h
                ratio = w / h
                if area > MIN_AREA and w > MIN_WIDTH and h > MIN_HEIGHT and MIN_RATIO < ratio < MAX_RATIO:
                    if x < plate_min_x:
                        plate_min_x = x
                    if y < plate_min_y:
                        plate_min_y = y
                    if x + w > plate_max_x:
                        plate_max_x = x + w
                    if y + h > plate_max_y:
                        plate_max_y = y + h

            img_result = plate_img[plate_min_y:plate_max_y,
                                   plate_min_x:plate_max_x]
            img_result = cv2.GaussianBlur(img_result, ksize=(3, 3), sigmaX=0)
            _, img_result = cv2.threshold(img_result,
                                          thresh=0.0,
                                          maxval=255.0,
                                          type=cv2.THRESH_BINARY
                                          | cv2.THRESH_OTSU)

            # dilation
            kernel = np.ones((2, 2), np.uint8)
            img_result = cv2.dilate(img_result, kernel=kernel)

            img_result = cv2.copyMakeBorder(img_result,
                                            top=10,
                                            bottom=10,
                                            left=10,
                                            right=10,
                                            borderType=cv2.BORDER_CONSTANT,
                                            value=(0, 0, 0))

            chars = pytesseract.image_to_string(img_result,
                                                lang='kor',
                                                config='--psm 7 --oem 0')

            result_chars = ''
            has_digit = False
            for c in chars:
                if ord('가') <= ord(c) <= ord('힣') or c.isdigit():
                    if c.isdigit():
                        has_digit = True
                    result_chars += c

            plate_chars.append(result_chars)
            if has_digit and len(result_chars) > longest_text:
                longest_idx = i
                longest_text = len(result_chars)

        # Result
        if len(plate_chars) == 0:
            gc.collect()
            return None
        # info = plate_infos[longest_idx]
        chars = plate_chars[longest_idx]

        # print(chars)
        gc.collect()
        return chars
def keyframeDetection(dest, Thres, plotMetrics=False, verbose=False):
    
    keyframePath = dest+'/keyFrames'
    imageGridsPath = dest+'/imageGrids'
    csvPath = dest+'/csvFile'
    path2file = csvPath + '/output.csv'
    prepare_dirs(keyframePath, imageGridsPath, csvPath)

    cap = cv2.VideoCapture('tmp.mp4')
    length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
  
    if (cap.isOpened()== False):
        print("Error opening video file")

    lstfrm = []
    lstdiffMag = []
    timeSpans = []
    images = []
    full_color = []
    lastFrame = None
    Start_time = time.process_time()
    fps = cap.get(cv2.CAP_PROP_FPS)
    def timestamp(frame_number):
      return float(frame_number)/fps
    # Read until video is completed
    for i in range(length):
        ret, frame = cap.read()
        grayframe, blur_gray = convert_frame_to_grayscale(frame)

        frame_number = cap.get(cv2.CAP_PROP_POS_FRAMES) - 1
        lstfrm.append(frame_number)
        images.append(grayframe)
        full_color.append(frame)
        if frame_number == 0:
            lastFrame = blur_gray

        diff = cv2.subtract(blur_gray, lastFrame)
        diffMag = cv2.countNonZero(diff)
        lstdiffMag.append(diffMag)
        stop_time = time.process_time()
        time_Span = stop_time-Start_time
        timeSpans.append(time_Span)
        lastFrame = blur_gray

    cap.release()
    y = np.array(lstdiffMag)
    base = peakutils.baseline(y, 2)
    indices = peakutils.indexes(y-base, Thres, min_dist=1)
    
    ##plot to monitor the selected keyframe
    if (plotMetrics):
        plot_metrics(indices, lstfrm, lstdiffMag)

    cnt = 1
    for x in indices:
        cv2.imwrite(os.path.join(keyframePath , 'keyframe'+ str(timestamp(x)) +'.jpg'), full_color[x])
        cnt +=1
        log_message = 'keyframe ' + str(cnt) + ' happened at ' + str(timeSpans[x]) + ' sec.'
        if(verbose):
            print(log_message)
        with open(path2file, 'w') as csvFile:
            writer = csv.writer(csvFile)
            writer.writerows(log_message)
            csvFile.close()

    cv2.destroyAllWindows()
Example #37
0
import os
from PIL import Image
from PIL import ImageChops
from matplotlib import pyplot as plt

image1 = cv.imread('./sample/0006_3.jpg')
image1 = cv.resize(image1, (960, 540))
# image1 = cv.cvtColor(image1, cv.COLOR_BGR2GRAY)
image2 = cv.imread('./sample/0006_3_A.jpg')
image2 = cv.resize(image2, (960, 540))

# image2 = cv.cvtColor(image2, cv.COLOR_BGR2GRAY)
[height, width, channels] = image1.shape
print(height, width)
# image2 = cv.resize(image2, (457, 395))
image3 = cv.subtract(image2, image1)
# image3 = image3 - np.mean(image3)
image3[image3 < 0] = 0
# image3 = cv.cvtColor(image3, cv.COLOR_BGR2GRAY)
cv.imshow("1", image3)
image3 = cv.cvtColor(image3, cv.COLOR_BGR2GRAY)
cv.imshow("1.1", image3)


def colorDiff(img1, img2):
    b1, g1, r1 = cv.split(img1)
    b2, g2, r2 = cv.split(img2)

    diff1 = cv.absdiff(b1, b2)
    diff2 = cv.absdiff(g1, g2)
    diff3 = cv.absdiff(r1, r2)
Example #38
0
    def remove_background(self, img):
        print("Removing  background")
        #== Parameters =======================================================================
        BLUR = 21
        CANNY_THRESH_1 = 5
        CANNY_THRESH_2 = 10
        MASK_DILATE_ITER = 10
        MASK_ERODE_ITER = 10
        MASK_COLOR = (255, 255, 255)  # In BGR format

        #== Edge Background Removal ==========================================================

        #-- Read image -----------------------------------------------------------------------
        arr_img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
        bg = cv2.cvtColor(arr_img, cv2.COLOR_RGB2RGBA)
        gray = cv2.cvtColor(arr_img, cv2.COLOR_BGR2GRAY)

        #-- Edge detection -------------------------------------------------------------------
        edges = cv2.Canny(gray, CANNY_THRESH_1, CANNY_THRESH_2)
        edges = cv2.dilate(edges, None)
        edges = cv2.erode(edges, None)

        #-- Find contours in edges, sort by area ---------------------------------------------
        contour_info = []
        _, contours, _ = cv2.findContours(edges, cv2.RETR_LIST,
                                          cv2.CHAIN_APPROX_NONE)
        for c in contours:
            contour_info.append((
                c,
                cv2.isContourConvex(c),
                cv2.contourArea(c),
            ))
        contour_info = sorted(contour_info, key=lambda c: c[2], reverse=True)
        max_contour = contour_info[0]

        #-- Create empty mask, draw filled polygon on it corresponding to largest contour ----
        # Mask is black, polygon is white
        mask = np.zeros(edges.shape)
        cv2.fillConvexPoly(mask, max_contour[0], (255, 255, 255))

        #-- Smooth mask, then blur it --------------------------------------------------------
        mask = cv2.dilate(mask, None, iterations=MASK_DILATE_ITER)
        mask = cv2.erode(mask, None, iterations=MASK_ERODE_ITER)
        mask = cv2.GaussianBlur(mask, (BLUR, BLUR), 0)

        #== Flood-fill background removal =====================================================
        im_floodfill = arr_img.copy()

        # Invert floodfilled image
        im_floodfill_inv = cv2.bitwise_not(im_floodfill)

        # Create mask of image forground for keeping
        mask_flood = self.get_foreground(im_floodfill_inv)

        # Combine Edge and Flood-fill masks
        combined = cv2.subtract(mask.astype("uint8"),
                                mask_flood.astype("uint8"))

        # Apply combined mask to alpha channel of image
        bg[:, :, 3] = combined

        # Apply mask for visual checkpoint to see success of background removal
        bg_visual = cv2.bitwise_and(arr_img,
                                    arr_img,
                                    mask=mask.astype("uint8"))
        fg_visual = cv2.bitwise_or(arr_img,
                                   arr_img,
                                   mask=mask_flood.astype("uint8"))
        combined_visual = cv2.subtract(bg_visual, fg_visual)

        # Save test images
        cv2.imwrite('./bg.jpg', bg_visual)
        cv2.imwrite('./fg.jpg', fg_visual)
        cv2.imwrite('./combined_masked.jpg', combined_visual)

        return bg
    gpManz.append(G)

#Generar una piramide Gaussiana para globo
G = globo.copy()
gpGlobo = [G]
for i in range(6):
    G = cv2.pyrDown(G)
    gpGlobo.append(G)

#Generar una piramide laplaciana para manzana
lpManzana = [gpManz[5]]
for i in range(5, 0, -1):
    GE = cv2.pyrUp(gpManz[i])
    height, width = gpManz[i - 1].shape[:2]
    GE1 = cv2.resize(GE, (width, height))
    L = cv2.subtract(gpManz[i - 1], GE1)
    lpManzana.append(L)

#Genera una pirámide laplaciana para globo
lpGlobo = [gpGlobo[5]]
for i in range(5, 0, -1):
    GE = cv2.pyrUp(gpGlobo[i])
    height, width = gpGlobo[i - 1].shape[:2]
    GE1 = cv2.resize(GE, (width, height))
    L = cv2.subtract(gpGlobo[i - 1], GE1)
    lpGlobo.append(L)

#Adicciona la mitad izquiera de la imagen 'manzana' con la mitad derecha de la imagen 'globo'
#para cada nivel

LS = []
Example #40
0
types=[]
empty_img = cv2.imread('empty.jpg')
types.append(empty_img)
for square in all_square_list:
    if not isImageExist(square, types):
        types.append(square)

#这部分,将所有切割出的19*11个方块分类,加上空白方块有38种,编号0-37.然后将19*11个方块依次比较来
#做编号分类,将所有的方块,编码成数字矩阵。
record = []
line = []
for square in all_square_list:
    num = 0
    for type1 in types:
        res = cv2.subtract(square, type1)
        if not np.any(res):
            line.append(num)
            break
        num+=1
#这里line是二维数组,len(line)是它的行数,既纵列方块个数;
#for square in all_square_list  这里是纵向遍历的,所以后面要transpose,恢复成正常视觉顺序。
    if len(line) == 11:
        record.append(line)
        line=[]

result = np.transpose(record)
print(result)
#    return record
for i in range(108):
            autoRelease(result,game_x,game_y)
Example #41
0
import cv2
import numpy as np

img = cv2.imread("../00_images/hand.jpg")

# create Gaussian pyramid
# We can find Gaussian pyramids using cv.pyrDown() and cv.pyrUp() functions.
layer = img.copy()
gaussian_pyramid = [layer]

for i in range(6):
    layer = cv2.pyrDown(layer)
    gaussian_pyramid.append(layer)

# create laplacian (laplaacian) pyramid
layer = gaussian_pyramid[5]
cv2.imshow("6", layer)
laplacian_pyramid = [layer]
# start =5, stop = 0, -1 = reverse order
for i in range(5, 0, -1):
    size = (gaussian_pyramid[i - 1].shape[1], gaussian_pyramid[i - 1].shape[0])
    gaussian_expanded = cv2.pyrUp(gaussian_pyramid[i], dstsize=size)
    laplacian = cv2.subtract(gaussian_pyramid[i - 1], gaussian_expanded)
    laplacian_pyramid.append(laplacian)
    cv2.imshow(str(i), laplacian)

cv2.imshow("Gaussian 0", gaussian_pyramid[0])

cv2.imshow("img", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Example #42
0
def box_detection(img_color, result, box):
    ''' 초기 설정 '''
    blurred = cv2.GaussianBlur(img_color, (5, 5), 0)  # 가우시안 블러 적용
    gray = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)  # 그레이스케일로 변환
    retval, bin = cv2.threshold(gray, 0.2 * gray.max(), 255,
                                cv2.THRESH_BINARY)  # 바이너리 이미지 생성
    # cv2.imshow('bin', bin) # 생성된 바이너리 이미지 확인
    # cv2.waitKey()
    ''' 이미지 세그멘테이션 '''
    # 노이즈 제거
    kernel = np.ones((5, 5), np.uint8)  # 커널 크기는 5*5
    # opening = cv2.morphologyEx(bin,cv2.MORPH_OPEN,kernel, iterations = 3) # 오프닝 연산으로 배경 노이즈 제거
    opening = cv2.morphologyEx(bin, cv2.MORPH_CLOSE, kernel,
                               iterations=3)  # 클로징 연산으로 객체 내부 노이즈 제거
    # cv2.imshow('opening', opening) # 모폴로지 연산 후 생성된 바이너리 이미지 확인
    # cv2.waitKey()

    # 확실한 배경 확보
    sure_bg = cv2.dilate(opening, kernel, iterations=3)

    # 뼈대 이미지
    dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)
    result_dist_transform = cv2.normalize(dist_transform, None, 255, 0,
                                          cv2.NORM_MINMAX, cv2.CV_8UC1)

    #  확실한 전경 확보
    ret, sure_fg = cv2.threshold(result_dist_transform,
                                 0.7 * result_dist_transform.max(), 255,
                                 cv2.THRESH_BINARY)
    sure_fg = np.uint8(sure_fg)
    sure_fg = cv2.dilate(sure_fg, kernel, iterations=3)  # 전경을 확대해줌
    # cv2.imshow('sure_fg', sure_fg)
    # cv2.waitKey()

    # 확실한 배경 - 확실한 전경 = 모르는 부분
    unknown = cv2.subtract(sure_bg, sure_fg)

    # 이미지 라벨링
    ret, markers = cv2.connectedComponents(
        sure_fg)  # 확실한 전경 영역을 라벨링 *마커는 0(배경)부터 지정됨
    markers = markers + 1  # 결과 마커를 하나 증가 시켜서 배경을 1로 설정
    markers[unknown == 255] = 0  # 모르는 부분을 0으로 라벨링

    # 전경, 배경에 0 이상의 값, 불명확한 것에 0 -> 이 알고리즘이 불명확한 것을 판단 + 경계선을 -1로
    markers = cv2.watershed(img_color, markers)
    img_color[markers == -1] = [0, 0, 0]  # 객체의 외곽부분 검정색으로
    img_color[markers == 1] = [0, 0, 0]  # 배경 부분은 검정색으로, 객체는 원래 색 그대로
    # cv2.imshow('foreground', img_color) # 알고리즘 적용되어 객체만 추출된 이미지 확인
    # cv2.waitKey()
    ''' 검출된 전경의 꼭짓점 찾기 '''
    # 전경의 꼭짓점을 찾기 위해 코너 디텍트
    img_gray = cv2.cvtColor(img_color, cv2.COLOR_BGR2GRAY)
    corners = cv2.goodFeaturesToTrack(
        img_gray, 150, 0.01, 5)  # 코너를 찾을 이미지, 코너 최대 검출 개수, 코너 강도, 코너 사이의 거리

    # 코너로 검출된 점에서 최소 좌표와 최대 좌표를 찾아서 꼭짓점 결정
    pos = [0, 10000, 10000, 0, 0, -1, -1,
           0]  # x, min_y, min_x, y, x, max_y, max_x, y
    for i in corners:
        x, y = i[0]
        if x > 5 and y > 5 and x < 635 and y < 475:  # 카메라 화면의 꼭짓점 검출 방지
            if y < pos[1]:  # Y의 최소 좌표 찾기 (왼쪽 상단)
                pos[0] = x
                pos[1] = y
            if x < pos[2]:  # X의 최소 좌표 찾기 (왼쪽 하단)
                pos[2] = x
                pos[3] = y
            if y > pos[5]:  # Y의 최대 좌표 찾기 (오른쪽 하단)
                pos[4] = x
                pos[5] = y
            if x > pos[6]:  # X의 최대 좌표 찾기 (오른쪽 상단)
                pos[6] = x
                pos[7] = y
        # cv2.circle(img_color, (x, y), 3, (0, 0, 255), 2) # 코너에 원으로 표시

    # cv2.imshow('corner', img_color) # 코너가 표시된 이미지 확인
    # cv2.waitKey()

    # print(pos)
    # cv2.waitKey()

    if abs(pos[5] - pos[7]) <= 30:  # 박스가 카메라에 정방향으로 잡힌다면

        # 컨투어 검출
        retval, img_bin = cv2.threshold(img_gray, 1, 255,
                                        cv2.THRESH_BINARY)  # 바이너리 이미지 생성
        val, contours, hierarchy = cv2.findContours(img_bin, cv2.RETR_LIST,
                                                    cv2.CHAIN_APPROX_SIMPLE)

        # 면적이 가장 작은 컨투어(=박스) 추출
        min_area = 1000000
        min_index = -1
        index = -1
        for i in contours:
            area = cv2.contourArea(i)
            index = index + 1
            if area < min_area:
                min_area = area
                min_index = index

        if min_index == -1:  # 검출된 컨투어가 없으면
            return result, box  # 이전 상태 그대로 반환

        # 결과 이미지에 컨투어 표시
        # cv2.drawContours(result, contours, min_index, (0, 255, 0), 2)
        ''' 결과 반환: 박스가 카메라와 정방향 '''
        # 컨투어를 둘러싸는 가장 작은 사각형 그리기
        cnt = contours[min_index]
        rect = cv2.minAreaRect(cnt)
        box = cv2.boxPoints(rect)
        box = np.int0(box)  # 정수형으로 변경
        result = cv2.drawContours(result, [box], 0, (0, 0, 255),
                                  2)  # 찾아낸 꼭짓점을 따라 윤곽선 그려줌
        return result, box  # 윤곽선 그려진 전체 이미지, 꼭짓점 반환

    else:
        ''' 결과 반환: 박스가 카메라와 정방향 X '''
        box = ((pos[0], pos[1]), (pos[2], pos[3]), (pos[4], pos[5]),
               (pos[6], pos[7]))  # 꼭짓점 지정
        box = np.int0(box)  # 정수형으로 변경
        result = cv2.drawContours(result, [box], 0, (0, 0, 255),
                                  2)  # 찾아낸 꼭짓점을 따라 윤곽선 그려줌
        return result, box  # 윤곽선 그려진 전체 이미지, 꼭짓점 반환
Example #43
0
import cv2 as cv
import numpy as np

image1 = cv.imread('new.jpg')
image2 = cv.imread('new2.jpg')

diff = cv.subtract(image1, image2)

result = not np.any(diff)

if result is True:
    print('the images are the same')
else:
    print('the images are diff')
Example #44
0
def textDetectWatershed(img):
    """ Text detection using watershed algorithm - NOT IN USE """
    # According to: http://docs.opencv.org/trunk/d3/db4/tutorial_py_watershed.html
    img = resize(img, 2000)
    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    ret, thresh = cv2.threshold(gray, 0, 255,
                                cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    # ret, thresh = cv2.threshold(gray, 50, 255, cv2.THRESH_BINARY)

    # noise removal
    kernel = np.ones((3, 3), np.uint8)
    opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=3)

    # sure background area
    sure_bg = cv2.dilate(opening, kernel, iterations=3)

    # Finding sure foreground area
    dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)
    ret, sure_fg = cv2.threshold(dist_transform, 0.01 * dist_transform.max(),
                                 255, 0)

    # Finding unknown region
    sure_fg = np.uint8(sure_fg)
    unknown = cv2.subtract(sure_bg, sure_fg)

    # Marker labelling
    ret, markers = cv2.connectedComponents(sure_fg)

    # Add one to all labels so that sure background is not 0, but 1
    markers += 1

    # Now, mark the region of unknown with zero
    markers[unknown == 255] = 0

    markers = cv2.watershed(img, markers)
    implt(markers, t='Markers')
    image = img.copy()

    for mark in np.unique(markers):
        # mark == 0 --> background
        if mark == 0:
            continue

        # Draw it on mask and detect biggest contour
        mask = np.zeros(gray.shape, dtype="uint8")
        mask[markers == mark] = 255

        cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)[-2]
        c = max(cnts, key=cv2.contourArea)

        # Draw a bounding rectangle if it contains text
        x, y, w, h = cv2.boundingRect(c)
        cv2.drawContours(mask, c, 0, (255, 255, 255), cv2.FILLED)
        maskROI = mask[y:y + h, x:x + w]
        # Ratio of white pixels to area of bounding rectangle
        r = cv2.countNonZero(maskROI) / (w * h)

        # Limits for text
        if r > 0.2 and 2000 > w > 15 and 1500 > h > 15:
            cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)

    implt(image)
for i in range(6):
    orange_copy = cv2.pyrDown(orange_copy)
    gp_orange.append(orange_copy)





# Generate Laplacian pyramid for apple
apple_copy = gp_apple[5]
lp_apple = [apple_copy]

for i in range(5,0,-1):
    gaussian_extended = cv2.pyrUp(gp_apple[i])
    laplacian = cv2.subtract(gp_apple[i-1],gaussian_extended)
    lp_apple.append(laplacian)


# Generate Laplacian pyramid for orange
orange_copy = gp_orange[5]
lp_orange = [orange_copy]

for i in range(5,0,-1):
    gaussian_extended = cv2.pyrUp(gp_orange[i])
    laplacian = cv2.subtract(gp_orange[i-1],gaussian_extended)
    lp_orange.append(laplacian)



Example #46
0
#PART 2
img = cv2.imread('../DATA/pennies.jpg')
img = cv2.medianBlur(img,35)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)

#NOISE REMOVAL (OPTIONAL)
kernel = np.ones((3,3),np.uint8)
opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel,iterations=2)

dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)

ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)
sure_bg = cv2.dilate(opening,kernel,iterations=3)
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg,sure_fg)

ret, markers = cv2.connectedComponents(sure_fg)
markers = markers + 1
markers[unknown==255] = 0

markers = cv2.watershed(img,markers)
# FIND CONTOURS
image, contours, hierarchy = cv2.findContours(markers.copy(),cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE)
for i in range(len(contours)):
    if hierarchy[0][i][3] == -1:
        cv2.drawContours(sep_coins,contours,i,(255,0,0),10)

display(sep_coins)
    c = src_pts1.ravel()
    d = dst_pts1.ravel()

    canvas1 = gray1.copy()
    canvas2 = gray2.copy()

    for k in range(1,a):
        cv2.circle(canvas1, (c[2*k],c[2*k-1]), 80, (255, 255, 255), -1)
        cv2.circle(canvas2, (d[2*k],d[2*k-1]), 80, (255, 255, 255), -1)
    
    blurred1 = cv2.GaussianBlur(canvas1, (9, 9),0)
    blurred2 = cv2.GaussianBlur(canvas2, (9, 9),0)
    gradX1 = cv2.Sobel(blurred1, ddepth=cv2.CV_32F, dx=1, dy=0)
    gradY1 = cv2.Sobel(blurred1, ddepth=cv2.CV_32F, dx=0, dy=1)

    gradient1 = cv2.subtract(gradX1, gradY1)
    gradient1 = cv2.convertScaleAbs(gradient1)
    
    gradX2 = cv2.Sobel(blurred2, ddepth=cv2.CV_32F, dx=1, dy=0)
    gradY2 = cv2.Sobel(blurred2, ddepth=cv2.CV_32F, dx=0, dy=1)

    gradient2 = cv2.subtract(gradX2, gradY2)
    gradient2 = cv2.convertScaleAbs(gradient2)
    
    blurred = cv2.GaussianBlur(gradient1, (9, 9),0)
    (_, thresh) = cv2.threshold(blurred, 225, 0, 4)
    (_, thresh) = cv2.threshold(thresh, 30, 0, 3)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (25, 25))
    closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
    closed = cv2.erode(closed, None, iterations=4)
    closed = cv2.dilate(closed, None, iterations=4)
Example #48
0
import cv2
from matplotlib import pyplot as plt
import numpy as np

img = cv2.imread('fieldman.jpg')
layer = img.copy()
gp = [layer]

for i in range(6):
    layer = cv2.pyrDown(layer)
    gp.append(layer)
    #cv2.imshow(str(i), layer)

layer = gp[5]
cv2.imshow('upper level Gaussian Pyramid', layer)
lp = [layer]

for i in range(5, 0, -1):
    gaussian_extended = cv2.pyrUp(gp[i])
    laplacian = cv2.subtract(gp[i - 1].shape[1], gaussian_extended.shape[0])
    cv2.imshow(str(i), laplacian)

cv2.imshow('Original image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Example #49
0
sightX = 330
sightY = 245

while(True):

    timeStamp = time.time()
    if debug:
        print("")
        print("--New F:")
    ret, img = cam.read()  # 获取一帧图像
    #img = cv2.imread("testImg.png")

    # 图像通道分离
    blueImg, greenImg, redImg = cv2.split(img)  # 分离图像的RGB通道
    if mode == "BLUE":                          # 分析识别模式
        img2 = cv2.subtract(blueImg, redImg)  # B通道-R通道
    else:
        img2 = cv2.subtract(redImg, blueImg)  # R通道-B通道
    img2 = cv2.subtract(img2, greenImg)  # 上一步运算结果-G通道

    ret, img2 = cv2.threshold(img2, 50, 255, cv2.THRESH_BINARY)  # 图像二值化处理
    img2 = cv2.morphologyEx(img2, cv2.MORPH_OPEN, kernel)  # 开运算,减少暗斑
    contours, hierarchy = cv2.findContours(
        img2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)  # 获取轮廓
    cv2.drawContours(img, contours, -1, (0, 255, 0), 2)  # 在原始图像上绘制轮廓以进行显示

    n = 0
    x = []
    y = []
    longSide = []
    shortSide = []
import cv2
from skimage.metrics import structural_similarity
import imutils
import numpy as np
from matplotlib import pyplot as plt

# load the two input images
img_1 = cv2.imread("images/red2.jpg")
img_2 = cv2.imread("images/red3.jpg")

#-----------------check both img size ----------------------------------
if img_1.shape == img_2.shape:

    print("The images have same size and channels")
    print("Processing...")
    difference = cv2.subtract(img_1, img_2)
    b, g, r = cv2.split(difference)

    # convert the images to grayscale-----------------------------------
    grayA = cv2.cvtColor(img_1, cv2.COLOR_BGR2GRAY)
    grayB = cv2.cvtColor(img_2, cv2.COLOR_BGR2GRAY)

    # compute the Structural Similarity Index (SSIM) between the two----
    # images, ensuring that the difference image is returned------------
    (score, diff) = structural_similarity(grayA, grayB, full=True)
    diff = (diff * 255).astype("uint8")

    if cv2.countNonZero(b) == 0 and cv2.countNonZero(
            g) == 0 and cv2.countNonZero(r) == 0 and score == 1:
        print("The images are completely Equal")
        print("Similarities Percentage:- {}".format(score * 100))
Example #51
0

    _, img = capture.read()
    if len(esquinasCancha) == 2:
        img = img[esquinasCancha[0].pos[1] : esquinasCancha[1].pos[1] , esquinasCancha[0].pos[0] : esquinasCancha[1].pos[0]]

    imgBlur = cv2.blur(img,(10,10))

    hsv_img = cv2.cvtColor(imgBlur, cv2.COLOR_BGR2HSV)

    for obj in [obj for obj in objetos if isinstance(obj, Movil)]:

        lower[0] = obj.color
        upper[0] = obj.color

        lower = cv2.subtract(lower,error)
        upper = cv2.add(upper,error)


        ml = max(80, min(margenLocal , int(obj.area/10000)))


        hsv_imgParcial , pos = subImagen(hsv_img,obj.pos,ml)

        imgParcial, _ = subImagen(img,obj.pos,ml)


        thresChico = cv2.inRange(hsv_imgParcial, lower, upper)


        momentsChico = cv2.moments(thresChico, 0)
def finger(result_names):
    #print(result_names)
    global count0
    global count1
    global count2
    global count3
    global count4
    global count5
    global count6
    original = cv2.imread("finger.bmp")
    image_to_compare = cv2.imread(str(result_names) + ".bmp")
    if original.shape == image_to_compare.shape:
        print("The images have same size and channels")
        difference = cv2.subtract(original, image_to_compare)
        b, g, r = cv2.split(difference)
        if cv2.countNonZero(b) == 0 and cv2.countNonZero(
                g) == 0 and cv2.countNonZero(r) == 0:
            print("This fingerprint image is not clear")
        else:
            print("This fingerprint image is clear")

        sift = cv2.xfeatures2d.SIFT_create()
        kp_1, desc_1 = sift.detectAndCompute(original, None)
        kp_2, desc_2 = sift.detectAndCompute(image_to_compare, None)
        index_params = dict(algorithm=0, trees=5)
        search_params = dict()
        flann = cv2.FlannBasedMatcher(index_params, search_params)

        matches = flann.knnMatch(desc_1, desc_2, k=2)
        good_points = []
        ratio = 0.6
        #serial_port.write(b'3')
        for m, n in matches:
            if m.distance < ratio * n.distance:
                good_points.append(m)
        if (len(good_points) > 1):
            print("fingerprint matched")
            if result_names == 'Rohit saw':
                if (count0 < 1):

                    print("you are valid for voting")

                    serial_port.write(b'1')
                    time.sleep(0.5)
                    serial_port.write(b'3')

                    count0 = count0 + 1
                else:
                    print("Voter blocked")
            if result_names == 'Tarakeshava':
                if (count1 < 1):

                    print("you are valid for voting")

                    serial_port.write(b'1')
                    time.sleep(0.5)
                    serial_port.write(b'3')

                    count1 = count1 + 1
                else:
                    print("Voter Blocked")
            if result_names == 'Rohan':
                if (count2 < 1):

                    print("you are valid for voting")

                    serial_port.write(b'2')
                    time.sleep(0.5)
                    serial_port.write(b'3')

                    count2 = count2 + 1
                else:
                    print("Voter Blocked")
            if result_names == 'Sandeep':
                if (count3 < 1):

                    print("you are valid for voting")

                    serial_port.write(b'2')
                    time.sleep(0.5)
                    serial_port.write(b'3')

                    count3 = count3 + 1
                else:
                    print("you are not valid for voting")
        else:

            print("Voter Blocked")

        result = cv2.drawMatches(original, kp_1, image_to_compare, kp_2,
                                 good_points, None)
        cv2.imshow("result", result)
        cv2.imshow("Original", original)
        cv2.imshow("Duplicate", image_to_compare)
        cv2.waitKey(0)
import cv2
import numpy as np
import matplotlib.pyplot as plt

image = cv2.imread('../images/1.jpg')

M = np.ones(image.shape, dtype='uint8') * 75

sub = cv2.subtract(image, M) * 75
cv2.imshow('sub', sub)
add = cv2.add(image, M)
cv2.imshow('add', add)
cv2.imshow('m', M)

cv2.waitKey()
cv2.destroyAllWindows()
Example #54
0
# Equalize the image and calculate histogram:
image_eq = equalize_hist_color(image)
hist_image_eq = hist_color_img(image_eq)

# Add 15 to every pixel on the grayscale image (the result will look lighter) and calculate histogram
M = np.ones(image.shape, dtype="uint8") * 15
added_image = cv2.add(image, M)
hist_color_added_image = hist_color_img(added_image)

# Equalize image and calculate histogram
added_image_eq = equalize_hist_color(added_image)
hist_added_image_eq = hist_color_img(added_image_eq)

# Subtract 15 from every pixel (the result will look darker) and calculate histogram
subtracted_image = cv2.subtract(image, M)
hist_color_subtracted_image = hist_color_img(subtracted_image)

# Equalize image and calculate histogram
subtracted_image_eq = equalize_hist_color(subtracted_image)
hist_subtracted_image_eq = hist_color_img(subtracted_image_eq)

# Plot the images and the histograms (without equalization first)
show_img_with_matplotlib(image, "image", 1)
show_hist_with_matplotlib_rgb(hist_color, "color histogram", 2,
                              ['b', 'g', 'r'])
show_img_with_matplotlib(added_image, "image lighter", 5)
show_hist_with_matplotlib_rgb(hist_color_added_image, "color histogram", 6,
                              ['b', 'g', 'r'])
show_img_with_matplotlib(subtracted_image, "image darker", 9)
show_hist_with_matplotlib_rgb(hist_color_subtracted_image, "color histogram",
Example #55
0
import cv2
import numpy as np

video = cv2.VideoCapture("homework3.mp4")
while True:
    TrueOrFalse, Slide = video.read()
    
    if TrueOrFalse ==True:

        GraySlide=cv2.subtract(cv2.subtract(Slide[:,:,0], Slide[:,:,2]), Slide[:,:,1])
        # cv2.imshow("www", GraySlide)

        BinSlide = cv2.inRange(GraySlide, 5, 65)
        # cv2.imshow("m2", BinSlide)

        #膨脹侵蝕
        BinSlide = cv2.dilate(BinSlide, np.ones((95, 95)))
        BinSlide = cv2.erode(BinSlide, np.ones((60, 60)))

        #取輪廓數據
        aa,bb=cv2.findContours(BinSlide ,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)

        #把輪廓印出來
        for i in aa:
            x, y, w, h =cv2.boundingRect(i)
            cv2.rectangle(Slide, (x, y), (x+w, y+h), (0, 0, 255), 2)      #先把紅框框全部畫上

        cv2.imshow("ShowPage", Slide)         #最後再一次印出
        if cv2.waitKey(30) == 13:
            break
    else:
Example #56
0
def init_img_filter(img):
    img = cv2.imread(img, cv2.IMREAD_GRAYSCALE)
    height, width = img.shape[:2]
    filter_arg = int(width * 0.125)
    filter_arg = filter_arg if filter_arg % 2 == 1 else filter_arg + 1
    img_filtered = cv2.GaussianBlur(img, (filter_arg, 1), 0)
    th, img_threshold = cv2.threshold(img_filtered, 165, 255,
                                      cv2.THRESH_BINARY_INV)

    img_open = cv2.morphologyEx(img_threshold, cv2.MORPH_OPEN,
                                np.ones((1, 75), np.uint8))
    img_close = cv2.morphologyEx(img_open, cv2.MORPH_CLOSE,
                                 np.ones((5, 1), np.uint8))
    img_closed_blr = cv2.GaussianBlur(img_close, (3, 3), 0)

    img_closed_thresh = cv2.threshold(img_closed_blr, 101, 255,
                                      cv2.THRESH_BINARY)[1]

    img_y_filter = cv2.GaussianBlur(img, (1, 55), 0)
    th, img_y_thresh = cv2.threshold(img_y_filter, 150, 255,
                                     cv2.THRESH_BINARY_INV)
    img_y_open = cv2.morphologyEx(img_y_thresh, cv2.MORPH_OPEN,
                                  np.ones((5, 5), np.uint8))
    img_y_close = cv2.morphologyEx(img_y_open, cv2.MORPH_CLOSE,
                                   np.ones((2, 2), np.uint8))

    modded = cv2.subtract(img_closed_thresh, img_y_close)

    th, img_threshold_1 = cv2.threshold(img, 155, 255, cv2.THRESH_BINARY_INV)
    hold_0, contours_0, hierarchy = cv2.findContours(img_threshold_1,
                                                     cv2.RETR_TREE,
                                                     cv2.CHAIN_APPROX_SIMPLE)
    blackhat = cv2.subtract(img_threshold_1, modded)

    blackhat_open = cv2.morphologyEx(blackhat, cv2.MORPH_OPEN,
                                     np.ones((2, 2), np.uint8))
    blackhat_closed = cv2.morphologyEx(blackhat_open, cv2.MORPH_CLOSE,
                                       np.ones((2, 2), np.uint8))
    blackhat_closed_blr = cv2.GaussianBlur(blackhat_closed, (1, 15), 4)

    show_boxes = blackhat_closed.copy()
    hold_0, contours_0, hierarchy = cv2.findContours(blackhat_closed_blr,
                                                     cv2.RETR_TREE,
                                                     cv2.CHAIN_APPROX_SIMPLE)

    boxes = {
        'x0': np.array([], dtype=np.uint16),
        'y0': np.array([], dtype=np.uint16),
        'x1': np.array([], dtype=np.uint16),
        'y1': np.array([], dtype=np.uint16),
        'xC': np.array([], dtype=np.uint16),
        'yC': np.array([], dtype=np.uint16),
        'width': np.array([], dtype=np.uint16),
        'height': np.array([], dtype=np.uint16),
        'ratio': np.array([], dtype=np.float32),
        'area': np.array([], dtype=np.float32),
        'angle': np.array([], dtype=np.float32),
        'pixel_mean': np.array([], dtype=np.uint16),
        'pixel_mean_q0': np.array([], dtype=np.uint16),
        'pixel_mean_q1': np.array([], dtype=np.uint16),
        'pixel_mean_q2': np.array([], dtype=np.uint16),
        'pixel_mean_q3': np.array([], dtype=np.uint16)
    }
    for c in contours_0:
        x, y, w, h = cv2.boundingRect(c)
        show_boxes = cv2.rectangle(show_boxes, (x, y), (x + w, y + h),
                                   (155, 155, 155), 2)
        boxes['x0'] = np.append(boxes['x0'], [x])
        boxes['y0'] = np.append(boxes['y0'], [y])
        boxes['x1'] = np.append(boxes['x1'], [x + w])
        boxes['y1'] = np.append(boxes['y1'], [y + h])
        boxes['xC'] = np.append(boxes['xC'], [int(x + ((w + x) * 0.5))])
        boxes['yC'] = np.append(boxes['yC'], [int(y + ((h + y) * 0.5))])
        boxes['width'] = np.append(boxes['width'], [w])
        boxes['height'] = np.append(boxes['height'], [h])
        boxes['ratio'] = np.append(boxes['ratio'], [h / w])
        boxes['area'] = np.append(boxes['area'], [h * w])
        if len(c) > 4:
            (x, y), (MA, ma), angle = cv2.fitEllipse(c)
        else:
            angle = -1
        boxes['angle'] = np.append(boxes['angle'], [angle])
        boxes['pixel_mean'] = np.append(
            boxes['pixel_mean'],
            np.mean(show_boxes[int(y):int(y + h),
                               int(x):int(x + w)]))
        if w >= 2 and h >= 2:
            for index_0 in range(4):
                if index_0 % 2 == 0:
                    x_0 = int(x)
                    x_1 = int(x + ((w + x) / 2))
                else:
                    x_0 = int(x + (w / 2))
                    x_1 = int(x + w)
                if index_0 < 2:
                    y_0 = int(y)
                    y_1 = int(y + (h / 2))
                else:
                    y_0 = int(y + (h / 2))
                    y_1 = int(y + h)

                value = show_boxes[y_0:y_1, x_0:x_1]
                string = 'pixel_mean_q' + str(index_0)
                #                print(string,x_0,x_1,y_0,y_1,value)
                #                print(np.mean(value))
                if np.mean(value) != np.nan:
                    boxes[string] = np.append(boxes[string], np.mean(value))
                else:
                    boxes[string] = np.append(boxes[string], int(0))
        else:
            boxes['pixel_mean_q0'] = np.append(boxes['pixel_mean_q0'], [0])
            boxes['pixel_mean_q1'] = np.append(boxes['pixel_mean_q1'], [0])
            boxes['pixel_mean_q2'] = np.append(boxes['pixel_mean_q2'], [0])
            boxes['pixel_mean_q3'] = np.append(boxes['pixel_mean_q3'], [0])

    df = pd.DataFrame(data=boxes)
    return df, img
#construct the argument parse and parse the argument
#ap = argparse.ArgumentParser()
#ap.add_argument("-i", "--image", requied = True, help = "/jurassic_world.jpg")
#args = vars(ap.parse_args)

#load the image and aconvert into the gray scale image
image = cv2.imread("jurassic_world.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

#compute the scharr gradient magnitude representation of the images
#in both x and y cordiation
gradeX = cv2.Sobel(gray, ddepth=cv2.cv.CV_32F, dx=1, dy=0, ksize=-1)
gradeY = cv2.Sobel(gray, ddepth=cv2.cv.CV_32F, dx=0, dy=1, ksize=-1)

#substract the y-gradient from x- gradient
gradient = cv2.subtract(gradeX, gradeY)
gradient = cv2.convertScaleAbs(gradient)

#blur and threshold  the image
bluurred = cv2.blur(gradient, (9, 9))
(_, thresh) = cv2.threshold(bluurred, 225, 255, cv2.THRESH_BINARY)

# construct a closing kernel and apply it to  the thresholded image
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (21, 7))
closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)

#perform a series of erosion and dialtion
closed = cv2.erode(closed, None, iterations=4)
closed = cv2.dilate(closed, None, iterations=4)

# find the contours in the thresholded image, then sort the contours
list_repeated = []
list_unique = [0]
for i in trange(number_of_rows - 1):
    bool_repeated = False
    if df.iloc[i + 1]['x'] == df.iloc[i]['x'] and df.iloc[
            i + 1]['y'] == df.iloc[i]['y'] and df.iloc[
                i + 1]['width'] == df.iloc[i]['width'] and df.iloc[
                    i + 1]['height'] == df.iloc[i]['height']:
        image_i = cv2.imread(
            os.path.join(raw_image_directory, str(df.iloc[i]['filename'])) +
            ".jpg")
        image_iplus1 = cv2.imread(
            os.path.join(raw_image_directory, str(df.iloc[i +
                                                          1]['filename'])) +
            ".jpg")
        difference = cv2.subtract(image_i, image_iplus1)
        b, g, r = cv2.split(difference)
        if cv2.countNonZero(b) == 0 and cv2.countNonZero(
                g) == 0 and cv2.countNonZero(r) == 0:
            number_of_repeated = number_of_repeated + 1
            dict1 = {'filename': df.iloc[i + 1]['filename']}
            list_repeated.append(dict1)
            bool_repeated = True

    if bool_repeated == False:
        list_unique.append(i + 1)

print("number_of_repeated", number_of_repeated)
print("number_of_unique", len(list_unique))
output_file = "/4t/yangchihyuan/TransmittedImages/ShuffleNet/repeated.csv"
df_repeated = pd.DataFrame(list_repeated)
Example #59
0
        plt.subplot(1, 2, 2), plt.imshow(images[1], 'gray')
        plt.title(titles[1])
        plt.xticks([]), plt.yticks([])
        plt.ion()
        plt.pause(0.4)  #显示秒数
        plt.close()
    else:
        img_name = "./walk02/walk02" + str(i + 1) + ".jpg"
        img_color = cv2.imread(img_name, cv2.IMREAD_COLOR)

        img_name1 = "./walk02/walk02" + str(i) + ".jpg"
        img_name2 = "./walk02/walk02" + str(i + 1) + ".jpg"
        #print(i+2,"-YES")
        img_color_1 = cv2.imread(img_name1)  # 直接读为灰度图像
        img_color_2 = cv2.imread(img_name2)  # 直接读为灰度图像
        img_sub = cv2.subtract(img_color_1, img_color_2)
        #相減後相加(灰圖)
        if i == 1:
            add_img_sub = img_sub
        else:
            add_img_sub = cv2.add(add_img_sub, img_sub)

        #剪完後二值化
        img1 = cv2.imread(img_name1, 0)  # 直接读为灰度图像
        img2 = cv2.imread(img_name2, 0)  # 直接读为灰度图像
        img_sub = cv2.subtract(img1, img2)
        ret_2, thresh3 = cv2.threshold(img_sub, 127, 255, cv2.THRESH_TRUNC)
        #儲存thresh3

        output_tru_name = "./walk02-out/tru/walk02-out-tru-" + str(i) + ".jpg"
        cv2.imwrite(output_tru_name, thresh3)
Example #60
0
import cv2
import numpy as np
from matplotlib import pyplot as plt

img_path = 'D:\Jatayu Unmanned Systems\dataset2\Elon.jpg'
image = cv2.imread(img_path)
img = image

print("max of 255: {}".format(cv2.add(np.uint8([200]), np.uint8([100]))))
print("min of 0: {}".format(cv2.subtract(np.uint8([50]), np.uint8([100]))))
print("wrap around: {}".format(np.uint8([200]) + np.uint8([100])))  # x1+x2-256
print("wrap around: {}".format(np.uint8([50]) - np.uint8([100])))  # x1-x2+256

M = np.ones(img.shape, dtype="uint8") * 180
added = cv2.add(img, M)
cv2.imshow("Added", added)  # gets brighter
cv2.waitKey(0)
# subtract makes darker

# Bitwise operations
rectangle = np.zeros(img.shape, dtype="uint8")
cv2.rectangle(rectangle, (25, 25), (275, 275), 255, -1)
cv2.imshow("Rectangle", rectangle)
cv2.waitKey(0)

circle = np.zeros(img.shape, dtype="uint8")
cv2.circle(circle, (150, 150), 150, 255, -1)
cv2.imshow("Circle", circle)
cv2.waitKey(0)

m = cv2.bitwise_and(circle, rectangle)