コード例 #1
0
ファイル: find_obj.py プロジェクト: UASLab/ImageAnalysis
    def onmouse(event, x, y, flags, param):
        #cur_vis = vis
        if flags & cv2.EVENT_FLAG_LBUTTON:
            vis = vis0.copy()
            r = 8
            m = (anorm(p1 - (x, y)) < r) | (anorm(p2 - (x, y)) < r)
            idxs = np.where(m)[0]
            for i in idxs:
                status[i] = not status[i]
            draw_keypoints(vis)
            cv2.imshow(win, vis)

        if flags & cv2.EVENT_RBUTTONDOWN:
            vis = vis0.copy()
            r = 8
            m = (anorm(p1 - (x, y)) < r) | (anorm(p2 - (x, y)) < r)
            idxs = np.where(m)[0]
            kp1s, kp2s = [], []
            for i in idxs:
                 (x1, y1), (x2, y2) = p1[i], p2[i]
                 col = (red, green)[status[i]]
                 cv2.line(vis, (x1, y1), (x2, y2), col)
                 kp1, kp2 = kp_pairs[i]
                 kp1s.append(kp1)
                 kp2s.append(kp2)
            vis = cv2.drawKeypoints(vis, kp1s, flags=4, color=kp_color)
            vis[:,w1:] = cv2.drawKeypoints(vis[:,w1:], kp2s, flags=4, color=kp_color)
            cv2.imshow(win, vis)
コード例 #2
0
ファイル: test3.py プロジェクト: colphin/Python_OpenCV
def FASTDetection():
	#initiate the FAST object with default values
	fast = cv2.FastFeatureDetector()

	#Find and draw out the key points
	kp = fast.detect(img,None)
	img2 = cv2.drawKeypoints(img, kp, color = (0, 255, 0))

	#print all default params
	print "Threshold: ", fast.getInt('threshold')
	print "nonmaxSuppression: ", fast.getBool('nonmaxSuppression')
	#print "neighborhood: ", fast.getInt('type')
	print "Total Keypoints with nonmaxSuppression: ", len(kp)

	cv2.imshow('dst',img2)
	if cv2.waitKey(0) & 0xff == 27:
		cv2.destroyAllWindows()

	#Disable nomaxSuppression
	fast.setBool('nonmaxSuppression',0)
	kp = fast.detect(img, None)

	img3  = cv2.drawKeypoints(img, kp, color = (0 , 0 ,255))

	print "Total Keypoints with maxSuppression: ", len(kp)

	cv2.imshow('dst',img3)
	if cv2.waitKey(0) & 0xff == 27:
		cv2.destroyAllWindows()
コード例 #3
0
ファイル: sift.py プロジェクト: Bojiong/cs231a
def match(img1, img2):
	sift = cv2.xfeatures2d.SIFT_create()
	kp1, des1 = sift.detectAndCompute(img1,None)
	kp1_img = cv2.drawKeypoints(img1, kp1, img1, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
	
	kp2, des2 = sift.detectAndCompute(img2,None)
	kp2_img=cv2.drawKeypoints(img2,kp2,img2,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

	bf = cv2.BFMatcher()
	matches = bf.knnMatch(des1,des2, k=2)
	
	good = []
	src = []
	dst = []
	for m,n in matches:
		if m.distance < 0.7*n.distance:
			good.append([m])
	
	src_pts = np.float32([ kp1[m[0].queryIdx].pt for m in good ]).reshape(-1,1,2)
	
	des_cor = [kp2[m[0].trainIdx].pt for m in good]
	dst_pts = np.float32(des_cor).reshape(-1,1,2)
	
	centroid = findCentroid(des_cor)
	
	img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good, img2, flags=2)
	
	M, mask = cv2.findHomography(src_pts, dst_pts)
	
	
	return (matches, kp1_img, kp2_img, img3, M, centroid)
コード例 #4
0
ファイル: mono_avoid.py プロジェクト: simama/RealSense
def find_features():
    print ("Position the board and press SPACE to continue")
    k = 0xFF & cv2.waitKey(1)
    while k != ord(' '):
        status, img1 = cap.read()
        k = 0xFF & cv2.waitKey(1)
    status, img1 = cap.read()
    img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    #img1 = img1[80:160, 80:240]
    cv2.imshow("Video", img1)
    kp1, des1 = orb_f(img1)
    img = cv2.drawKeypoints(img1,kp1,dummy,color=(0,255,0), flags=0)
    cv2.imshow("Video", img)
    print ('Please move the patern and press SPACE when you are ready,'
            'or press ESC to cancel the calibration')
    k = 0
    while k != ord(' '):
        status, img2 = cap.read()
        k = 0xFF & cv2.waitKey(1)
        if k == 27:
            return [], []
    status, img2 = cap.read()
    img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
    cv2.imshow("Video", img2)
    kp2, des2 = orb_f(img2) 
    img = cv2.drawKeypoints(img2,kp2,dummy,color=(0,255,0), flags=0)
    cv2.imshow("Video", img)
    print ("Press SPACE to continue")
    k = 0xFF & cv2.waitKey(0)
    print ("Done!")
    return kp1, des1, img1, kp2, des2, img2
コード例 #5
0
def do_fast(img, file_true, file_false):
    # Initiate FAST object with default values
    fast = cv2.FastFeatureDetector(50)

    # find and draw the keypoints
    kp = fast.detect(img, None)
    img2 = cv2.drawKeypoints(img, kp, color=(255, 0, 0))

    # Print all default params
    #print "Threshold: ", fast.getInt('threshold')
    #print "nonmaxSuppression: ", fast.getBool('nonmaxSuppression')
    #print "neighborhood: ", fast.getInt('type')
    #print "Total Keypoints with nonmaxSuppression: ", len(kp)

    cv2.imwrite(file_true, img2)

    # Disable nonmaxSuppression
    fast.setBool('nonmaxSuppression',0)
    kp = fast.detect(img,None)

    print "Total Keypoints without nonmaxSuppression: ", len(kp)

    img3 = cv2.drawKeypoints(img, kp, color=(255,0,0))

    cv2.imwrite(file_false, img3)
コード例 #6
0
ファイル: FAST.py プロジェクト: JamesPei/PythonProjects
def fast():
    img = cv2.imread('test.jpg',0)

    # Initiate FAST object with default values
    fast = cv2.FastFeatureDetector()

    # find and draw the keypoints
    kp = fast.detect(img,None)
    img2 = cv2.drawKeypoints(img, kp, color=(255,0,0))

    # Print all default params
    print "Threshold: ", fast.getInt('threshold')
    print "nonmaxSuppression: ", fast.getBool('nonmaxSuppression')
    # print "neighborhood: ", fast.getInt('type')
    print "Total Keypoints with nonmaxSuppression: ", len(kp)

    # Disable nonmaxSuppression
    fast.setBool('nonmaxSuppression',0)
    kp = fast.detect(img,None)

    print "Total Keypoints without nonmaxSuppression: ", len(kp)
    img3 = cv2.drawKeypoints(img, kp, color=(255,0,0))
    cv2.imwrite('fast_false.png',img3)

    cv2.imshow('img2', img2)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
コード例 #7
0
def drawMatchesAndHomography(img1, img2, lkp1, lkp2, status, H, num=10):
    imgm1=cv2.drawKeypoints(img1, lkp1, color=(0,0,255))
    imgm2=cv2.drawKeypoints(img2, lkp2, color=(0,0,255))
    w1=imgm1.shape[1]
    #
    siz1=(img1.shape[1],img1.shape[0])
    arrROI=np.array([(0,0,1), (siz1[0],0,1), (siz1[0],siz1[1],1), (0,siz1[1],1), (0,0,1)])
    prjROI=np.transpose(np.dot(H,np.transpose(arrROI)))
    prjROI=prjROI/np.matlib.repmat(prjROI[:,2],3,1).transpose()
    # img2n=imgm2.copy()
    for ii in xrange(arrROI.shape[0]-1):
            p1=(int(prjROI[ii+0,0] + 0*siz1[0]), int(prjROI[ii+0,1]))
            p2=(int(prjROI[ii+1,0] + 0*siz1[0]), int(prjROI[ii+1,1]))
            cv2.line(imgm2, p1,p2, color=(0,255,0), thickness=3)
    #
    totimg=np.concatenate((imgm1.copy(),imgm2.copy()), axis=1).copy()
    cnt=0
    cntb=0
    for pp1,pp2 in zip(lkp1,lkp2):
        if status[cnt]==1:
            p1=(int(pp1.pt[0]),    int(pp1.pt[1]))
            p2=(int(pp2.pt[0])+w1, int(pp2.pt[1]))
            cv2.line(totimg,p1,p2,(0,255,0))
            if cntb>num:
                break
            cntb+=1
        cnt+=1
    return totimg
コード例 #8
0
def sift_space(fileList):
	image_points = []
	no_images = len(fileList)
	no_sift = 300
	numpoints = no_images*no_sift
	X = np.zeros((numpoints,128))
	i = 0

	# sift = cv2.xfeatures2d.SIFT_create(nfeatures=no_sift)
	sift = cv2.SIFT(nfeatures=no_sift)

	no_im = 0
	for fil in fileList:
		print fil
		im = cv2.imread(fil);
		gray= cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
		(kps, descs) = sift.detectAndCompute(gray, None)
		print len(kps)
		no_sift = min(300, len(kps))
		# print no_sift
		image_points.append([i,i+no_sift])
		no_im += 1
		X[i:i+no_sift] = descs[0:no_sift]
		i += no_sift
		img = im
		cv2.drawKeypoints(im,kps[0:no_sift],img)
		cv2.imwrite(kres_dir+fil[28:-4]+'2.jpg',img)
	# print X[0:i]
	# print image_points
	return [X[0:i],image_points]
コード例 #9
0
ファイル: views.py プロジェクト: blelem/HireMeOrGiveMeABeer
def mergeImages(img1, img2, AlignMethod='', Jacobian='',  **kwargs):
   
    (kp1Matches, kp2Matches) = Alignment2D.ExtractFeatures(img1, img2, **kwargs)
  
    Transform = Alignment2D.AlignImages(kp1Matches, kp2Matches, AlignMethod, Jacobian) 

    #Overlay the two images, showing the detected feature.
    rows,cols,colours = img1.shape
    Canvas1 = np.zeros((rows * 2, cols * 2, colours) , img1.dtype)
    Canvas2 = np.copy(Canvas1)

    finalRows, finalCols, colours = Canvas1.shape
    tx = cols/2; # Translate to the center of the canvas
    ty = rows/2; 
    M = np.float32([[1,0,tx],[0,1,ty],[0,0,1]])

    img3 = cv2.drawKeypoints(img1, kp1Matches,color=(0,0,255))
    cv2.warpPerspective(img3, M, (finalCols, finalRows), Canvas1)
    
    finalTransform = np.dot(M, Transform) ; # Translate to the center of the canvas
    img2 = cv2.drawKeypoints(img2, kp2Matches,color=(255,0,0))
    cv2.warpPerspective(img2, finalTransform, (finalCols, finalRows), Canvas2, borderMode=cv2.BORDER_TRANSPARENT)

    alpha = 0.5
    beta = (1.0 - alpha)
    cv2.addWeighted(Canvas1, alpha, Canvas2, beta, 0.0, Canvas1)

    return Canvas1
コード例 #10
0
ファイル: comparar2.py プロジェクト: mandrewcito/GEI-VA
def main():
  args = sys.argv[1:]
  sujeto1=args[0]
  sujeto2=args[1]
  im=Imagen("image-"+sujeto1+".jpg")
  im1=Imagen("image-"+sujeto2+".jpg")
  gray = cv2.cvtColor(im.imagen, cv2.COLOR_BGR2GRAY)
  gray1 = cv2.cvtColor(im1.imagen, cv2.COLOR_BGR2GRAY)
  orb=cv2.ORB()
  vis = im.imagen.copy()
  vis1 = im1.imagen.copy()

  #
  rects=f.detectCara(gray)
  x1c, y1c, x2c, y2c=rects[0]
  subrects,vis_roi=f.detectMouth(x1c,(y1c-y2c)*2/3,x2c,y2c,gray,vis)
  x1, y1, x2, y2=subrects[0]
  keypoints=orb.detect(im.imagen)
  zona=(x1c+x1,y1c+y1,x2c+x2,y2c+y2)
  #
  rects=f.detectCara(gray)
  x1, y1, x2, y2=rects[0]
  subrects,vis_roi=f.detectMouth(x1,(y1-y2)*2/3,x2,y2,gray1,vis1)
  x1, y1, x2, y2=subrects[0]
  keypoints1=orb.detect(im1.imagen)
  zona2=(x1c+x1,y1c+y1,x2c+x2,y2c+y2)
  cv2.drawKeypoints(im.imagen,keypoints,im.imagen)
  cv2.drawKeypoints(im1.imagen,keypoints1,im1.imagen)
  c=coincidencias(keypoints1,keypoints,zona,zona2)
  if c>10:
    print True
  else:
    print False
コード例 #11
0
    def drawrelation(self):
        if self.flags & cv2.EVENT_FLAG_LBUTTON:
            x,y = self.rx, self.ry
            cur_vis = self.vis0.copy()  # actual visualization
            r = self.thick + 8  # proximity to keypoint
            m = (ar.anorm(self.rp1 - (x, y)) < r) | (ar.anorm(self.rp2 - (x, y)) < r)
            idxs = np.where(m)[0]  # get indexes near pointer
            kp1s, kp2s = [], []
            for i in idxs:  # for all keypints near pointer
                (rx1, ry1), (rx2, ry2) = self.rp1[i], self.rp2[i]  # my keypoint
                col = (self.badcolor, self.goodcolor)[status[i]]  # choosing False=red,True=green
                cv2.line(cur_vis, (rx1,ry1), (rx2,ry2), col, self.thick)  # drawing line
                # keypoints to show on event
                kp1, kp2 = self.kp_pairs2[i]
                kp1s.append(kp1)
                kp2s.append(kp2)
            # drawing keypoints near pointer for imgf and imgb
            cur_vis = cv2.drawKeypoints(cur_vis, kp1s, flags=4, color=self.kpcolor)
            cur_vis = cv2.drawKeypoints(cur_vis, kp2s, flags=4, color=self.kpcolor)
            self.rimg = cur_vis
        else:
            self.rimg = self.vis

        if self.y is not None and self.x is not None:
            self.builtinplot(self.sample[self.y,self.x])
コード例 #12
0
ファイル: main.py プロジェクト: yycho0108/LearnOpenCV
def identify_blobs(image,processed,size):
    identified = image.copy()


    #BLOB DETECTION ...
    params = cv2.SimpleBlobDetector_Params()
    params.minDistBetweenBlobs = 0

    params.filterByColor = True 
    params.blobColor = 255

    params.filterByArea = True 
    params.minArea = circleArea(size) * 0.3 
    params.maxArea = circleArea(size) * 2.0

    params.filterByCircularity = False

    params.filterByConvexity = True 
    params.minConvexity = 0.5

    params.filterByInertia = False

    detector = cv2.SimpleBlobDetector_create(params)

    labels = detector.detect(processed)
    cv2.drawKeypoints(identified,labels,identified,color=(255,0,0),flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    return len(labels), identified
コード例 #13
0
ファイル: featureDetection.py プロジェクト: zoyron/opencv
def main():
    org_image = cv2.imread("../data/house.tiff", 1)
    '''
    SURF is better than SIFT and computes and detects feature fast, 
    but unfortunately both are paid.

    Alternative, we have ORB by OpenCV. Free. OSS.
    PARAM: nfeatures : Number of features to be detected.
                       Default value is around 100.
    '''

    sift = cv2.xfeatures2d.SIFT_create()
    surf = cv2.xfeatures2d.SURF_create()
    orb = cv2.ORB_create(nfeatures=1000)

    kp_sift, decep_sift = sift.detectAndCompute(org_image, None)
    kp_surf, decep_sift = surf.detectAndCompute(org_image, None)
    kp_orb, decep_sift = orb.detectAndCompute(org_image, None)

    org_image_sift = cv2.drawKeypoints(org_image, kp_sift, None)
    org_image_surf = cv2.drawKeypoints(org_image, kp_surf, None)
    org_image_orb = cv2.drawKeypoints(org_image, kp_orb, None)

    cv2.imshow("SIFT Features Detected", org_image_sift)
    cv2.imshow("SURF Features Detected", org_image_surf)
    cv2.imshow("ORB Features Detected", org_image_orb)

    cv2.waitKey(0)
    cv2.destroyAllWindows()
コード例 #14
0
def blob_process(cv_image):
  hsv_cv_image = convert_bgr2hsv(cv_image)

  yellow_mask_hsv = cv2.inRange(hsv_cv_image, YELLOW_MIN_HSV, YELLOW_MAX_HSV)
  red_mask_hsv = cv2.inRange(hsv_cv_image, RED_MIN_HSV, RED_MAX_HSV)
  green_mask_hsv = cv2.inRange(hsv_cv_image, GREEN_MIN_HSV, GREEN_MAX_HSV)
  blue_mask_hsv = cv2.inRange(hsv_cv_image, BLUE_MIN_HSV, BLUE_MAX_HSV)

  yellow_masked_img = cv2.bitwise_and(cv_image, cv_image, mask=yellow_mask_hsv)
  red_masked_img = cv2.bitwise_and(cv_image, cv_image, mask=red_mask_hsv)
  green_masked_img = cv2.bitwise_and(cv_image, cv_image, mask=green_mask_hsv)
  blue_masked_img = cv2.bitwise_and(cv_image, cv_image, mask=blue_mask_hsv)

  yr_or_img = cv2.bitwise_or(yellow_masked_img, red_masked_img)
  bg_or_img = cv2.bitwise_or(green_masked_img, blue_masked_img)
  final_or_img = cv2.bitwise_or(yr_or_img, bg_or_img)
  #final_or_img = cv2.bitwise_or(yr_or_img, green_masked_img)

  #kernel = np.ones((5,5),np.uint8)
  kernel = np.array([[0,1,1,1,0],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[0,1,1,1,0]],np.uint8)

  eroded_masked_img = cv2.erode(final_or_img, kernel, iterations = 1)
  dilatated_masked_img = cv2.dilate(eroded_masked_img,kernel,iterations = 2)
  #ilatated_masked_img = cv2.dilate(cv_image,kernel,iterations = 1)
  #bilateral = cv2.bilateralFilter(cv_image, 2,25,25)
  return dilatated_masked_img
  #return bilateral
  bouy_detector = cv2.SimpleBlobDetector()
  keypoints = bouy_detector.detect(processed_cv_image)
  cv2.drawKeypoints(processed_cv_image, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
  return processed_cv_image
コード例 #15
0
    def showImg(self, frame, keypoints, lines, contours):
        if not self.args.nodisplay:
            if keypoints:
                frame = cv2.drawKeypoints(frame, [keypoints[0]],
                                   np.array([]),
                                   (0,0,255), 
                                   cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) 
                if len(keypoints) > 1:
                    frame = cv2.drawKeypoints(frame, keypoints[1:],
                                   np.array([]),
                                   (255,205,25), 
                                   cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) 
            if lines != None:
                for l in lines[0]:
                    cv2.line(frame, (l[0], l[1]), (l[2], l[3]), (20,255,255))

            if contours != None:
                contours0,hier = contours
                cindex = self.values[3] # if -1, all are drawn
                maxlevel = self.values[4]
                if len(contours0) <= cindex:
                    self.putNotice("reset contour id")
                    values[3] = -1
                    cindex = -1
                cv2.drawContours(frame, contours0, cindex,
                                (128,255,255), 
                                thickness=1, 
                                lineType=cv2.CV_AA,
                                hierarchy=hier, 
                                maxLevel=maxlevel)
                
            cv2.imshow("img", frame)
コード例 #16
0
ファイル: find_obj.py プロジェクト: AleDel/Spout-numpy
    def onmouse(event, x, y, flags, param):
        cur_vis = vis
        if flags & cv2.EVENT_FLAG_LBUTTON:
            cur_vis = vis0.copy()
            r = 8
            m = (anorm(np.array(p1) - (x, y)) < r) | (anorm(np.array(p2) - (x, y)) < r)
            idxs = np.where(m)[0]
            kp1s, kp2s = [], []
            for i in idxs:
                 (x1, y1), (x2, y2) = p1[i], p2[i]
                 col = (red, green)[status[i]]
                 cv2.line(cur_vis, (x1, y1), (x2, y2), col)
                 kp1, kp2 = kp_pairs[i]
                 kp1s.append(kp1)
                 kp2s.append(kp2)
            cur_vis = cv2.drawKeypoints(cur_vis, kp1s, None, flags=4, color=kp_color)
            cur_vis[:,w1:] = cv2.drawKeypoints(cur_vis[:,w1:], kp2s, None, flags=4, color=kp_color)

        cv2.imshow(win, cur_vis)
        '''
        b_channel, g_channel, r_channel = cv2.split(cur_vis)
        alpha_channel = np.ones((cur_vis.shape[0], cur_vis.shape[1]),dtype=np.uint8) #creating a dummy alpha channel image.
        alpha_channel.fill(255)
        img_RGBA = cv2.merge((b_channel, g_channel, r_channel, alpha_channel))
        '''
        send1.send('image', cur_vis)
コード例 #17
0
def SURF(img2, debug):
    # Initiate SURF detector
    surf = cv2.xfeatures2d.SURF_create()
    
    # find the keypoints and descriptors with SURF
    kp1, des1 = surf.detectAndCompute(img1,None)
    kp2, des2 = surf.detectAndCompute(img2,None)
    
    #draw the keypoints
    cv2.drawKeypoints(img1,kp1,None,(255,0,0),4)
    # BFMatcher (Brute Force Matcher) Iniitialize with default params
    bf = cv2.BFMatcher()
    #do brute force matching with k nearest neighbors
    matches = bf.knnMatch(des1,des2, k=2)
    
    # Apply distance ratio test
    good = []
    for m,n in matches:
        if m.distance < 0.7*n.distance:
            good.append(m)
    
    if len(good)>10:
        src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
        dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
    
    img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,flags=2)
    if img3!= None and debug:
        plt.imshow(img3, 'gray'),plt.show()
        print "Number of Features: ", len(good)
    return good
コード例 #18
0
ファイル: ps5.py プロジェクト: cowens85/Fall2015
def two_a(a_img, a_Ix, a_Iy, a_corners, a_R, b_img, b_Ix, b_Iy, b_corners, b_R, a_run="foo", _size=5.0, _octave=0):
    a_angle = gradient_angle(a_Ix, a_Iy)
    a_kps = get_keypoints(a_corners, a_R, a_angle, _size, _octave)

    # TODO: Draw keypoints on transA

    b_angle = gradient_angle(b_Ix, b_Iy)
    b_kps = get_keypoints(b_corners, b_R, b_angle, _size, _octave)

    # TODO: Similarly, find keypoints for transB and draw them
    # TODO: Combine transA and transB images (with keypoints drawn) using make_image_pair() and write to file
    # make_image_pair(imgA, imgB)

    # print a_img.shape

    a_img_match = cv2.drawKeypoints(cv2.cvtColor(a_img.copy().astype(np.uint8), cv2.COLOR_GRAY2BGR), a_kps,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    b_img_match = cv2.drawKeypoints(cv2.cvtColor(b_img.copy().astype(np.uint8), cv2.COLOR_GRAY2BGR), b_kps,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    combined_img = make_image_pair(a_img_match, b_img_match)

    if a_run == "transA":
        write_image(combined_img, "ps5-2-a-1.png")
    elif a_run == "simA":
        write_image(combined_img, "ps5-2-a-2.png")


    # TODO: Ditto for (simA, simB) pair

    return a_kps, b_kps
コード例 #19
0
def displayKeypoints(matchDict):
	if s.DISPLAY() :
		m = utils.Bunch(matchDict)
		leftKpImg = cv2.drawKeypoints(m.left,m.leftKp,None,(255,0,0),4)
		rightKpImg = cv2.drawKeypoints(m.right,m.rightKp,None,(255,0,0),4)
		img3 = arragePairs(leftKpImg,rightKpImg)
		cv2.imshow('Keypoints',img3)
コード例 #20
0
ファイル: blob_test.py プロジェクト: uf-mil/PropaGator
def detect_bouys(processed_cv_image):
    bouy_detector = cv2.SimpleBlobDetector()
    keypoints = bouy_detector.detect(processed_cv_image)
    cv2.drawKeypoints(
        processed_cv_image, keypoints, np.array([]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS
    )
    return processed_cv_image
コード例 #21
0
ファイル: nearest_neighbors.py プロジェクト: Erotemic/vtool
def test_cv2_flann():
    """
    Ignore:
        [name for name in dir(cv2) if 'create' in name.lower()]
        [name for name in dir(cv2) if 'stereo' in name.lower()]

        ut.grab_zipped_url('https://priithon.googlecode.com/archive/a6117f5e81ec00abcfb037f0f9da2937bb2ea47f.tar.gz', download_dir='.')
    """
    import cv2
    from vtool.tests import dummy
    import plottool as pt
    import vtool as vt
    img1 = vt.imread(ut.grab_test_imgpath('easy1.png'))
    img2 = vt.imread(ut.grab_test_imgpath('easy2.png'))

    stereo = cv2.StereoBM_create(numDisparities=16, blockSize=15)
    disparity = stereo.compute(img1, img2)
    pt.imshow(disparity)
    pt.show()

    #cv2.estima

    flow = cv2.createOptFlow_DualTVL1()
    img1, img2 = vt.convert_image_list_colorspace([img1, img2], 'gray', src_colorspace='bgr')
    img2 = vt.resize(img2, img1.shape[0:2][::-1])
    out = img1.copy()
    flow.calc(img1, img2, out)

    orb = cv2.ORB_create()
    kp1, vecs1 = orb.detectAndCompute(img1, None)
    kp2, vecs2 = orb.detectAndCompute(img2, None)

    detector = cv2.FeatureDetector_create("SIFT")
    descriptor = cv2.DescriptorExtractor_create("SIFT")

    skp = detector.detect(img1)
    skp, sd = descriptor.compute(img1, skp)

    tkp = detector.detect(img2)
    tkp, td = descriptor.compute(img2, tkp)

    out = img1.copy()
    cv2.drawKeypoints(img1, kp1, outImage=out)
    pt.imshow(out)

    vecs1 = dummy.testdata_dummy_sift(10)
    vecs2 = dummy.testdata_dummy_sift(10)  # NOQA

    FLANN_INDEX_KDTREE = 0  # bug: flann enums are missing
    #flann_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=4)
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)   # or pass empty dictionary
    flann = cv2.FlannBasedMatcher(index_params, search_params)  # NOQA

    cv2.flann.Index(vecs1, index_params)

    #cv2.FlannBasedMatcher(flann_params)

    cv2.flann.Index(vecs1, flann_params)  # NOQA
コード例 #22
0
def showMatchingPoints(src_image,src_matches,dest_image,dest_matches):
    src_kp_image = ocv.drawKeypoints(src_image, src_matches, color=(0,255,255))
    dest_kp_image = ocv.drawKeypoints(dest_image, dest_matches, color=(0,255,255))
    plot.subplot(2,1,1)
    plot.imshow(src_kp_image)
    plot.subplot(2,1,2)
    plot.imshow(dest_kp_image)
    plot.show()
def Detect_and_Draw():
    img = cv2.imread('1.JPG', cv2.IMREAD_COLOR)
    G_img = cv2.cvtColor(img, cv2.IMREAD_GRAYSCALE)
    orb = cv2.ORB_create()
    kp, des = orb.detectAndCompute(G_img, None)
    cv2.drawKeypoints(img, kp, img, color=(255, 0, 0))
    cv2.imshow('result', img)
    cv2.waitKey(0)
コード例 #24
0
ファイル: bovw.py プロジェクト: UelitonFreitas/OpenCVPython
    def desenha_pontos_de_interesse_nas_imagens_em_tons_de_cinza(self):
        """Desenha pontos de interesse em cada uma das imagens coloridas"""

        for index in range(self.numero_de_imagens):
             cv2.drawKeypoints(self.imagens_em_tons_de_cinza[index],
                               self.pontos_de_interesse_das_imagens[index],
                               self.imagens_em_tons_de_cinza[index],
                               flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
コード例 #25
0
ファイル: image.py プロジェクト: 917228145/AutomatorX
def find_image_position(origin='origin.png', query='query.png', outfile=None):
    '''
    find all image positions
    @return None if not found else a tuple: (origin.shape, query.shape, postions)
    might raise Exception
    '''
    img1 = cv2.imread(query, 0) # query image(small)
    img2 = cv2.imread(origin, 0) # train image(big)

    # Initiate SIFT detector
    sift = cv2.SIFT()

    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1,None)
    kp2, des2 = sift.detectAndCompute(img2,None)
    print len(kp1), len(kp2)

    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    search_params = dict(checks = 50)

    # flann
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)

    # store all the good matches as per Lowe's ratio test.
    good = []
    for m,n in matches:
        if m.distance < 0.7*n.distance:
            good.append(m)
    print len(kp1), len(kp2), 'good cnt:', len(good)

    if len(good)*1.0/len(kp1) < 0.5:
    #if len(good)<MIN_MATCH_COUNT:
        print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)
        return img2.shape, img1.shape, []

    queryPts = []
    trainPts = []
    for dm in good:
        queryPts.append(kp1[dm.queryIdx])
        trainPts.append(kp2[dm.trainIdx])

    img3 = cv2.drawKeypoints(img1, queryPts)
    cv2.imwrite('image/query.png', img3)

    img3 = cv2.drawKeypoints(img2, trainPts)
    point = _middlePoint(trainPts)
    print 'position in', point

    if outfile:
        edge = 10
        top_left = (point[0]-edge, point[1]-edge)
        bottom_right = (point[0]+edge, point[1]+edge)
        cv2.rectangle(img3, top_left, bottom_right, 255, 2)
        cv2.imwrite(outfile, img3)
    return img2.shape, img1.shape, [point]
コード例 #26
0
ファイル: detector.py プロジェクト: Talamantez/sparrow
 def display_key_points(self):
     im = cv2.imread(self._imagePath)
     gray_im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
     detector = cv2.FeatureDetector_create(self.model.key_point_type)
     key_points = detector.detect(gray_im)
     # descriptor_extractor = cv2.DescriptorExtractor_create(self.model.key_point_type)
     # (key_points, descriptors) = descriptor_extractor.compute(gray_im, key_points)
     cv2.drawKeypoints(gray_im, key_points, im)
     cv2.imwrite("Image with keypoints.bmp", im)
コード例 #27
0
ファイル: sift.py プロジェクト: LMCallMe/PYCVReading
def plot_features(im,kp,circle=False):
    """ show image with features. input: im (image as array), 
        locs (row, col, scale, orientation of each feature) """
    if circle:
        img=cv2.drawKeypoints(im,kp,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    else:
        img=cv2.drawKeypoints(im,kp)
        
    imshow(img)
    axis('off')
コード例 #28
0
ファイル: Matcher.py プロジェクト: molinav/python-sat
    def _draw_keypoints(path, img, kps):
        """Export keypoints over the dataset to an image file."""

        fmt = cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS
        try:
            img2 = cv2.drawKeypoints(img, kps, flags=fmt)
        except TypeError:
            img2 = np.empty(img.shape, dtype=np.uint8)
            img2 = cv2.drawKeypoints(img, kps, img2, flags=fmt)
        cv2.imwrite(path, img2)
コード例 #29
0
ファイル: features.py プロジェクト: sunbirddy/cnn-crf-stereo
def compute_sift_matches(im0g, im1g, y_th=3, good_ratio=0.75, verbose=False):
    """
    Compute the SIFT matches given two images
    :param im0: first image
    :param im1: second image
    :param y_th: allowed distance in y-direction of the matches
    :param good_ratio: used to filter out low-response keypoints
    :return: the sorted good matches (based on response)
    """

    if int(cv2.__version__.split('.')[0])<3:
        sift = cv2.SIFT(nOctaveLayers=7)
    else:
        sift = cv2.xfeatures2d.SIFT_create(nOctaveLayers=7)

    kp0, des0 = sift.detectAndCompute(im0g, None)
    kp1, des1 = sift.detectAndCompute(im1g, None)

    bf_matcher = cv2.BFMatcher()
    matches = bf_matcher.knnMatch(des0, des1, k=2)

    # Apply ratio test
    good = []
    y_diffs = []
    for m, n in matches:
        if m.distance < good_ratio * n.distance:
            y_diff = kp0[m.queryIdx].pt[1] - kp1[m.trainIdx].pt[1]
            if np.abs(y_diff) < y_th:
                y_diffs.append(y_diff)
                good.append([m])

    sorted_good = sorted(good, key=lambda x: kp0[x[0].queryIdx].response, reverse=False)

    if verbose:
        plt.figure(15)
        im3 = cv2.drawKeypoints(im0g, kp0, im0g, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
        plt.imshow(im3)
        plt.title('im0 keypoints')
        plt.pause(0.1)

        plt.figure(17)
        im4 = cv2.drawKeypoints(im1g, kp1, im1g, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
        plt.imshow(im4)
        plt.title('im1 keypoints')
        plt.pause(0.1)

        im5 = cv2.drawMatchesKnn(im0g, kp0, im1g, kp1, sorted_good, im4, flags=2)

        plt.figure(16)
        plt.imshow(im5)
        plt.title('Found ' + str(len(sorted_good)) )
        plt.pause(0.1)

    return sorted_good, y_diffs, kp0, kp1
def save_sift_feature(img):
    img = cv.imread(img)
    gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
    # 创建sift类
    sift = cv.xfeatures2d.SIFT_create()
    # 在图像中找到关键点
    kp = sift.detect(gray, None)
    img = cv.drawKeypoints(gray, kp, img)
    # 计算每个点的sift
    des = sift.compute(gray, kp)
    # des[0] 关键点的list,des[1] 特征向量的矩阵
    img = cv.drawKeypoints(gray, kp, img)
    return img, des
コード例 #31
0
import numpy as np
import os
import cv2

path = os.getcwd() + "\\vision\\"
print(path)
name = path + "000000.png"
img = cv2.imread(name)

img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

# Initiate STAR detector
orb = cv2.ORB_create()

# find the keypoints with ORB
kp = orb.detect(img_gray, None)

# compute the descriptors with ORB
kp, des = orb.compute(img, kp)
img2 = None

# draw only keypoints location,not size and orientation
img2 = cv2.drawKeypoints(img, kp, img2, color=(0, 0, 255), flags=0)

cv2.imshow('orb', img2)
cv2.waitKey(0)
cv2.destroyAllWindows()
コード例 #32
0
img1 = read_image(filename)
img1_gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
harris_points1, sift_points1, sift_keypoints1, surf_points1, surf_keypoints1 = get_harris_sift_surf_points(img1_gray)

image_with_points = img1.copy()

for x, y in harris_points1:
    image_with_points[y, x] = [0, 0, 255]

image_rgb = cv2.cvtColor(image_with_points, cv2.COLOR_BGR2RGB)
plt.imshow(image_rgb)
plt.title("Image with Detected Corners Using Harris Algorithm")
plt.savefig('Images/harris_corners.png')

image_with_keypoints = cv2.drawKeypoints(img1, sift_keypoints1, None)
img_rgb = cv2.cvtColor(image_with_points, cv2.COLOR_BGR2RGB)
plt.imshow(img_rgb)
plt.title("Image with Detected Corners Using SIFT algorithm")
plt.savefig('Images/sift_corners.png')

image_with_keypoints = cv2.drawKeypoints(img1, surf_keypoints1, None)
img_rgb = cv2.cvtColor(image_with_points, cv2.COLOR_BGR2RGB)
plt.imshow(img_rgb)
plt.title("Image with Detected Corners Using SURF algorithm")
plt.savefig('Images/surf_corners.png')

noisy_repeatability = {'harris': [], 'sift': [], 'surf': []}
for i in range(len(noisy_image_files)):
    img2 = read_image(noisy_image_files[i])
    img2_gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
コード例 #33
0
def detect():

    cap = cv2.VideoCapture('DJI_0005.mp4')
    print("Target video captured")
    while True:

        ret, frame = cap.read()

        if ret is True:
            print("Processing the video")
            #bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
            #resized = cv2.resize(bgr, (640, 480)) # For a spesific resize operation
        else:
            break

        def resize(image, scale_percent):
            """
            Takes an image with a scaling percentage and returns resizing dimensions
            """
            width = int(image.shape[1] * scale_percent / 100)
            height = int(image.shape[0] * scale_percent / 100)
            dimensions = (width, height)

            return dimensions

        dim = resize(frame, 100)
        resized = cv2.resize(frame, dim)

        #blur = cv2.GaussianBlur(resized, (15, 15), 2) # Making neighboring pixels become a little more uniform in color
        hsv = cv2.cvtColor(
            resized, cv2.COLOR_BGR2HSV)  # Converting image to HSV colorspace
        #bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)

        #lower_red = np.array([0, 100, 100]) # Lower limits for the red color
        #upper_red = np.array([20, 255, 255]) # Upper limıts for the red color

        # Mask specifications for red/orange/yellow colors 0 80 150 , 100 255 255
        lower_red = np.array([0, 150, 150])  # Lower limits for the red color
        upper_red = np.array([100, 255, 255])  # Upper limıts for the red color
        red_mask = cv2.inRange(hsv, lower_red,
                               upper_red)  # Creating a red color mask
        red_masked = cv2.bitwise_and(resized, resized, mask=red_mask)
        #cv2.imshow("Red Masked Image", red_masked)

        # Mask specifications for dark blue/magenta/purple
        lower_purple = np.array([170, 100,
                                 100])  # Lower limits for the red color
        upper_purple = np.array([200, 255,
                                 255])  # Upper limıts for the red color
        purple_mask = cv2.inRange(hsv, lower_purple, upper_purple)
        purple_masked = cv2.bitwise_and(resized, resized, mask=purple_mask)
        #cv2.imshow("Purple Masked Image", purple_masked)

        #----------------------------------------------------------------------
        _, vid = cap.read()
        blended = cv2.addWeighted(red_masked, 1, purple_masked, 1, 0)
        #cv2.imshow("blended", blended)
        #----------------------------------------------------------------------

        blacked = cv2.cvtColor(blended, cv2.COLOR_BGR2GRAY)
        bw = cv2.adaptiveThreshold(blacked, 255,
                                   cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                   cv2.THRESH_BINARY, 115, -20)

        # For removing small objects in the image, opening operation applied
        # Opening operates erosion first and then dilation
        opening_kernel = np.ones((3, 3), np.uint8)
        #opening_kernel2 = np.ones((9,9),np.uint8)
        opening = cv2.morphologyEx(bw, cv2.MORPH_OPEN, opening_kernel)
        #cv2.imshow('Opening Operation',opening)
        #opening2 = cv2.morphologyEx(bw, cv2.MORPH_OPEN, opening_kernel2)
        #cv2.imshow('Opening Operation 2',opening2)

        # For filling holes in the objects, dilation applied
        dilation_kernel = np.ones((7, 7), np.uint8)
        dilation = cv2.dilate(opening, dilation_kernel, iterations=2)
        #cv2.imshow('Dilation Operation', dilation)

        # Open here if you want to convert black<>white
        #des = cv2.bitwise_not(dilation)
        #cv2.imshow('bitwise not', des)

        contours, _ = cv2.findContours(dilation, cv2.RETR_CCOMP,
                                       cv2.CHAIN_APPROX_SIMPLE)

        for c in contours:
            cv2.drawContours(dilation, [c], 0, 255, -1)

            # Calculate moments for each contour
            M = cv2.moments(c)

            # Calculate x,y coordinate of center
            c_X = int(M["m10"] / M["m00"])
            c_Y = int(M["m01"] / M["m00"])

            # Finding area for each contour
            #resized = cv2.drawContours(resized, contours, c, (0,0,255), 1)
            contour_area = cv2.contourArea(c)

            if (contour_area > 2000):
                contour_area = str(contour_area)
                #cv2.circle(resized, (c_X, c_Y), 5, (255, 255, 255), -1) Putting circle to given coordinates
                cv2.putText(resized, contour_area, (c_X, c_Y),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)

        filled = dilation
        #cv2.imshow('Filled', filled)

        # This conversation required for blop detecion
        filled = cv2.bitwise_not(filled)

        # Setup SimpleBlobDetector parameters
        params = cv2.SimpleBlobDetector_Params()

        # Change thresholds
        params.minThreshold = 10
        params.maxThreshold = 200

        # Filter by Area.
        params.filterByArea = True
        params.minArea = 20.0
        params.maxArea = 8000.0
        params.minDistBetweenBlobs = 15

        # Filter by Circularity
        params.filterByCircularity = False
        params.minCircularity = 0.1

        # Filter by Convexity
        params.filterByConvexity = True
        params.minConvexity = 0.87

        # Filter by Inertia
        params.filterByInertia = False
        params.minInertiaRatio = 0.01

        # Check OpenCV version and construct the detector
        is_v2 = cv2.__version__.startswith("2.")
        if is_v2:
            detector = cv2.SimpleBlobDetector()
        else:
            detector = cv2.SimpleBlobDetector_create(params)

        # Detect blobs
        keypoints = detector.detect(filled)

        # Draw detected blobs as red circles
        # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob

        im_with_keypoints = cv2.drawKeypoints(
            filled, keypoints, np.array([]), (0, 0, 255),
            cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

        # Show blobs
        cv2.imshow("Keypoints", im_with_keypoints)

        # Display the image/video
        cv2.imshow('Overlay Final Output', resized)

        if cv2.waitKey(1) & 0xff == ord('e'):
            break

    cap.release()
    cv2.destroyAllWindows()
コード例 #34
0
ファイル: blob_detection.py プロジェクト: ganya7/OpenCV
# Set up the detector with default parameters.
# detector = cv2.SimpleBlobDetector() #for opencv2.0
is_cv3 = cv2.__version__.startswith("3.")
if is_cv3:
    detector = cv2.SimpleBlobDetector_create()
else:
    detector = cv2.SimpleBlobDetector()

# Detect blobs.
keypoints = detector.detect(image)

# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of
# the circle corresponds to the size of blob
blank = np.zeros((1, 1))  #creates a matrix of 1x1 with elements 0
blobs = cv2.drawKeypoints(image, keypoints, blank, (0, 255, 255),
                          cv2.DRAW_MATCHES_FLAGS_DEFAULT)

# Show keypoints
cv2.imshow("Blobs", blobs)
cv2.waitKey(0)
cv2.destroyAllWindows()

# The function **cv2.drawKeypoints** takes the following arguments:
#
# **cv2.drawKeypoints**(input image, keypoints, blank_output_array, color, flags)
#
# flags:
# - cv2.DRAW_MATCHES_FLAGS_DEFAULT
# - cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS
# - cv2.DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG
# - cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS
コード例 #35
0
def draw_keypoints(image, keypoints, label=None):
    img = cv2.drawKeypoints(image, keypoints, None)
    img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    plt.imshow(img_rgb)
    plt.title(label=label)
    plt.show()
コード例 #36
0
ファイル: blobDetector.py プロジェクト: kkawabat/LaserBoard
    # exit simulation when 'q' key is pressed
    if cv2.waitKey(25) & 0xFF == ord('q'):
        flag = True
        break

    ret, view = vid.read()
    hsv_view = cv2.cvtColor(view,cv2.COLOR_BGR2HSV)
    thresh = cv2.inRange(hsv_view, np.array([lowH, lowS, lowV]), np.array([highH, highS, highV]))
    cv2.imshow('threshold', thresh)
    thresh = cv2.GaussianBlur(thresh, (5, 5), 0)
    view = cv2.bitwise_and(view,view,mask=thresh)
    keyPoints = detector.detect(view)


    if len(keyPoints) > 0:
        for i in range(0,len(keyPoints)):
            x = '%.1f' % keyPoints[i].pt[0]
            y = '%.1f' % keyPoints[i].pt[1]
            print("Blob detected at (" + str(x)+ " , "+ str(y) + ")")
    im_with_keypoints = cv2.drawKeypoints(view, keyPoints,np.array([]),(255,0,0),cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

    cv2.imshow('keypoints', im_with_keypoints)







コード例 #37
0
# FAST
# https://www.edwardrosten.com/work/rosten_2006_machine.pdf
# http://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/AV1011/AV1FeaturefromAcceleratedSegmentTest.pdf

# Create FAST Detector object
fast = cv2.FastFeatureDetector_create()

# Obtain Key points, by default non max suppression is On
# to turn off set fast.setBool('nonmaxSuppression', False)
keypoints = fast.detect(gray, None)
print "Number of keypoints Detected: ", len(keypoints)

out_im = image.copy()
# Draw rich keypoints on input image
image = cv2.drawKeypoints(image,
                          keypoints,
                          out_im, (0, 255, 0),
                          flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

cv2.imshow('Feature Method - FAST', image)
cv2.waitKey()
cv2.destroyAllWindows()

# # # BRIEF
# # http://cvlabwww.epfl.ch/~lepetit/papers/calonder_pami11.pdf
#
# image = cv2.imread('../images/input.jpg')
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#
# # Create FAST detector object
# fast = cv2.FastFeatureDetector_create()
#
コード例 #38
0
ファイル: surf.py プロジェクト: xiaojimao18/image-similar
import cv2
import numpy as np

img = cv2.imread("../1.png")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

sift = cv2.xfeatures2d.SURF_create()
kp = sift.detect(gray, None)

cv2.drawKeypoints(gray, kp, img)
cv2.imwrite("surf_keypoints.jpg", img)
コード例 #39
0
# -*- coding: utf-8 -*-

import cv2
import numpy as np

img = cv2.imread('/home/mahdi/9.jpg')
gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

sift = cv2.SIFT()
kp = sift.detect(gray,None)

img=cv2.drawKeypoints(gray,kp)

cv2.imwrite('./images/sift_keypoints.jpg',img)
コード例 #40
0
# extract normal SIFT descriptors
extractor = cv2.DescriptorExtractor_create("SIFT")
(kps_sift, descs_sift) = extractor.compute(image, kps)
print "SIFT: kps=%d, descriptors=%s " % (len(kps_sift), descs_sift.shape)

# extract RootSIFT descriptors
rs = RootSIFT()
(kps_rootsift, descs_rootsift) = rs.compute(image, kps)
print "RootSIFT: kps=%d, descriptors=%s " % (len(kps_rootsift),
                                             descs_rootsift.shape)

pylab.figure()
rgbImage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

img_SIFT = cv2.drawKeypoints(rgbImage, kps_sift, None, (255, 0, 255), 4)
pylab.gray()
pylab.subplot(2, 1, 1)
pylab.imshow(img_SIFT)
pylab.axis('off')

img_rootsift = cv2.drawKeypoints(rgbImage, kps_rootsift, None, (255, 0, 255),
                                 4)
pylab.gray()
pylab.subplot(2, 1, 2)
pylab.imshow(img_rootsift)
pylab.gray()
pylab.axis('off')

pylab.show()
コード例 #41
0
    # ScoreType = cv2.ORB_FAST_SCORE
    ORB = cv2.ORB(nfeatures=FeaturePointsNum,
                  scaleFactor=PyramidScale,
                  nlevels=PyramidLevel,
                  edgeThreshold=EdgeThresh,
                  firstLevel=FirstLevel,
                  WTA_K=OrientedBRIEFPointNum,
                  scoreType=ScoreType,
                  patchSize=PatchSize)

    # -------------- detect features --------------- #
    ModelKeyPoints = ORB.detect(ModelGrayImg, mask=None)
    ModelKeyPoints, ModelDescriptions = ORB.compute(ModelGrayImg,
                                                    ModelKeyPoints)
    ModelShowImg = cv2.drawKeypoints(image=ModelImg,
                                     keypoints=ModelKeyPoints,
                                     color=RED)

    QueryKeyPoints = ORB.detect(QueryGrayImg, mask=None)
    QueryKeyPoints, QueryDescriptions = ORB.compute(QueryGrayImg,
                                                    QueryKeyPoints)
    QueryShowImg = cv2.drawKeypoints(image=QueryImg,
                                     keypoints=QueryKeyPoints,
                                     color=RED)

    cv2.imshow('ModelShowImg', ModelShowImg)
    cv2.imshow('QueryShowImg', QueryShowImg)

    # -------------- matching --------------- #
    # use crossCheck instead of ratio check
    CrossCheckFlag = True
コード例 #42
0
ファイル: surf.py プロジェクト: nishathussain/openCV_Misc
print("# kps: {}, descriptors: {}".format(len(kps), descs.shape))
# kps: 274, descriptors: (274, 128)

surf = cv2.xfeatures2d.SURF_create()

(kps, descs) = surf.detectAndCompute(gray, None)
print("# kps: {}, descriptors: {}".format(len(kps), descs.shape))
# kps: 393, descriptors: (393, 64)

surf.setHessianThreshold(2000)
print surf.getHessianThreshold()

(kp, des) = surf.detectAndCompute(gray, None)
print("# kps: {}, descriptors: {}".format(len(kp), des.shape))

img2 = cv2.drawKeypoints(image, kp, None, (255, 0, 0), 4)

cv2.imshow('image', img2)
cv2.waitKey(0)
cv2.destroyAllWindows()

# Check upright flag, if it False, set it to True
print surf.getUpright()
surf.setUpright(True)
print surf.getUpright()
# Recompute the feature points and draw it
kp = surf.detect(gray, None)
img2 = cv2.drawKeypoints(image, kp, None, (255, 0, 0), 4)

cv2.imshow('image', img2)
cv2.waitKey(0)
コード例 #43
0
def detect_blob(img, mask_params):

    LED_list = []

    l_b = mask_params[0]
    u_b = mask_params[1]

    l_g = mask_params[2]
    u_g = mask_params[3]

    l_r = mask_params[4]
    u_r = mask_params[5]

    # Setup SimpleBlobDetector parameters.
    params = cv2.SimpleBlobDetector_Params()

    # Change thresholds
    params.minThreshold = 10
    params.maxThreshold = 255

    # Filter by Area.
    params.filterByArea = True
    params.minArea = 100

    # All to be adjusted to the real test image
    # Filter by Circularity
    params.filterByCircularity = False
    params.minCircularity = 0.1

    # Filter by Convexity
    params.filterByConvexity = False
    params.minConvexity = 0.1

    # Filter by Inertia
    params.filterByInertia = False
    params.minInertiaRatio = 0.1

    # Create a detector with the parameters
    # the if statements are in case its runs on an older version of OpenCV since the function name changes in previous versions
    ver = (cv2.__version__).split('.')
    if int(ver[0]) < 3:
        detector = cv2.SimpleBlobDetector(params)
    else:
        detector = cv2.SimpleBlobDetector_create(params)

    # using these bounds on the received image to genberate a Blue mask

    img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    mask_b = cv2.inRange(img_hsv, l_b,
                         u_b)  # mask created using threshold values
    res_b = cv2.bitwise_and(img, img, mask=mask_b)
    gray_res_b = cv2.cvtColor(res_b, cv2.COLOR_BGR2GRAY)

    #gray_res_b= cv2.equalizeHist(gray_res_b)        HISTOGRAM EQUALIZATION LEADS TO ARTIFACTS , BUT HELPS IF CONTRAST FROM BACK GROUND IS LOW
    #gray_res_b = cv2.blur(gray_res_b, (5, 5))       MAKES FOR WEAKER EDGES  DECIDED LATER IF YOU ANT TO KEEP BOTH FOR ALL 3 COLORS

    cv2.imshow("masked image_b", 255 - gray_res_b)

    # Detect blobs.
    keypoints = detector.detect(255 - gray_res_b)

    # Draw detected blobs as red circles.
    # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
    im_with_keypoints = cv2.drawKeypoints(
        img, keypoints, np.array([]), (0, 0, 0),
        cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

    # Show keypoints

    for i in range(0, len(keypoints)):
        cv2.circle(im_with_keypoints,
                   (round(keypoints[i].pt[0]), round(keypoints[i].pt[1])), 1,
                   (0, 0, 0), 3)
        LED_list.append(
            [round(keypoints[i].pt[0]),
             round(keypoints[i].pt[1]), "b"])
    cv2.imshow("Keypoints", im_with_keypoints)

    # Repeating for green
    mask_g = cv2.inRange(img_hsv, l_g,
                         u_g)  # mask created using threshold values
    res_g = cv2.bitwise_and(img, img, mask=mask_g)
    gray_res_g = cv2.cvtColor(res_g, cv2.COLOR_BGR2GRAY)

    #gray_res_g = cv2.equalizeHist(gray_res_g)
    #gray_res_g = cv2.blur(gray_res_g, (5, 5))

    cv2.imshow("masked image_g", 255 - gray_res_g)

    # Detect blobs.
    keypoints = detector.detect(255 - gray_res_g)

    # Draw detected blobs as black circles.
    # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
    im_with_keypoints = cv2.drawKeypoints(
        im_with_keypoints, keypoints, np.array([]), (0, 0, 0),
        cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

    # Show keypoints

    for i in range(0, len(keypoints)):
        cv2.circle(im_with_keypoints,
                   (round(keypoints[i].pt[0]), round(keypoints[i].pt[1])), 1,
                   (0, 0, 0), 3)
        LED_list.append(
            [round(keypoints[i].pt[0]),
             round(keypoints[i].pt[1]), "g"])
    cv2.imshow("Keypoints", im_with_keypoints)

    # Repeating for Red

    mask_r = cv2.inRange(img_hsv, l_r,
                         u_r)  # mask created using threshold values
    res_r = cv2.bitwise_and(img, img, mask=mask_r)
    gray_res_r = cv2.cvtColor(res_r, cv2.COLOR_BGR2GRAY)

    #gray_res_r = cv2.equalizeHist(gray_res_r)
    #gray_res_r = cv2.blur(gray_res_r, (5, 5))

    cv2.imshow("masked image_r", 255 - gray_res_r)

    # Detect blobs.
    keypoints = detector.detect(255 - gray_res_r)

    # Draw detected blobs as red circles.
    # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
    im_with_keypoints = cv2.drawKeypoints(
        im_with_keypoints, keypoints, np.array([]), (0, 0, 0),
        cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

    # Show keypoints

    for i in range(0, len(keypoints)):
        cv2.circle(im_with_keypoints,
                   (round(keypoints[i].pt[0]), round(keypoints[i].pt[1])), 1,
                   (0, 0, 0), 3)
        LED_list.append(
            [round(keypoints[i].pt[0]),
             round(keypoints[i].pt[1]), "r"])
    cv2.imshow("Keypoints", im_with_keypoints)

    #  print(LED_list)
    cv2.waitKey(0)
    return LED_list
 def key_show(self,img,key):
     img = cv2.drawKeypoints(img, key, None,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
     cv2.imshow("Image", img)
     cv2.waitKey(0)
     cv2.destroyAllWindows()
コード例 #45
0
ファイル: ORB.py プロジェクト: nessmp/PruebasRaspberryPi
'''
ORB busca la mayor velocidad de todas
'''
import cv2
import numpy as np

input_image = cv2.imread("/home/pi/Desktop/Milker_robot/image11.jpg")
gray_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2GRAY)

orb = cv2.ORB_create()

kp = orb.detect(gray_image, None)

kp, ds = orb.compute(gray_image, kp)

finalKP = input_image
finalKP = cv2.drawKeypoints(input_image,
                            kp,
                            color=(0, 255, 0),
                            flags=0,
                            outImage=finalKP)

cv2.imshow("ORB", finalKP)
cv2.waitKey()
cv2.destroyAllWindows()
コード例 #46
0
# opevCV가 지원하는 특징점 검출 알고리즘 중 하나
# 4. Shift
#
# 이전과 다르게, 이미지 피라미드를 이용하여, 크기변화에 따른 특징검출 방식#

import cv2
import numpy as np

img = cv2.imread('/home/lkw/Downloads/cube.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

# SIFT 추출기 생성
sift = cv2.xfeatures2d.SIFT_create()
# 키 포인트 검출과 서술자 계산
keypoints, descriptor = sift.detectAndCompute(gray, None)
print('keypoint:', len(keypoints), 'descriptor:', descriptor.shape)
print(descriptor)

# 키 포인트 그리기
img_draw = cv2.drawKeypoints(img,
                             keypoints,
                             None,
                             flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# 결과 출력
cv2.imshow('SIFT', img_draw)
cv2.waitKey()
cv2.destroyAllWindows()
コード例 #47
0
ファイル: stepper_webcam_analyze.py プロジェクト: fcant/dicer
    cv2.imshow('erosion',erosion)
    cv2.imshow('closing',closing)
    
    closing = cv2.bitwise_not(closing)

    w = closing.shape[1]  # y
    h = closing.shape[0]  # x

    mask = np.zeros((h + 2, w + 2), np.uint8)
    cv2.floodFill(closing, mask, (0, 0), 255);
    cv2.floodFill(closing, mask, (0, 200), 255);

    detector = cv2.SimpleBlobDetector_create(blob_params)
    keypoints = detector.detect(closing)

    img_with_keypoints = cv2.drawKeypoints(closing, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    
    
    
    number = 0
    
    for i in keypoints[0:]:
        number = number + 1
    #print(number)
    

    
    if number == 1:
        one = one +1
    elif number == 2:    
        two = two +1
コード例 #48
0
ファイル: record.py プロジェクト: francis-james/fishing
        
    # Set up the detector with default parameters.
    detector = cv2.SimpleBlobDetector()
     
    # Detect blobs.
    keypoints = detector.detect(im)
     
    # Draw detected blobs as red circles.
    # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
    keypoints2=[]
    for i in keypoints:
        if i.size>20 and i.size<40:
            #print i.size
            keypoints2.append(i)
            openposns.append(i.pt)
    im_with_keypoints = cv2.drawKeypoints(im, keypoints2, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
            # Show keypoints
    cv2.imshow("Keypoints", im_with_keypoints)
    cv2.waitKey(1)
    file = "test_image"+str(i)+".png"
    # A nice feature of the imwrite method is that it will automatically choose the
    # correct format based on the file extension you provide. Convenient!


#cv2.imwrite(file, im_with_keypoints)
 
# You'll want to release the camera, otherwise you won't be able to create a new
# capture object until your script exits
del(camera)

def euclideanDist(a,b):
コード例 #49
0
    mask_blue = cv.inRange(frame_hsv, lower_blue, upper_blue)

    # Run the SimpleBlobDetector on the mask.
    # The results are stored in a vector of 'KeyPoint' objects,
    # which describe the location and size of the blobs.
    #keypoints = detector.detect(mask)
    keypoints_pink = detector.detect(mask_pink)
    keypoints_green = detector.detect(mask_green)
    keypoints_yellow = detector.detect(mask_yellow)
    keypoints_blue = detector.detect(mask_blue)

    # For each detected blob, draw a circle on the frame
    #frame_with_keypoints = cv.drawKeypoints(frame, keypoints, None, color = (0, 255, 0), flags = cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    frame_with_keypoints_pink = cv.drawKeypoints(
        frame,
        keypoints_pink,
        None,
        color=(0, 255, 0),
        flags=cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    frame_with_keypoints_green = cv.drawKeypoints(
        frame,
        keypoints_green,
        None,
        color=(0, 255, 0),
        flags=cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    frame_with_keypoints_yellow = cv.drawKeypoints(
        frame,
        keypoints_yellow,
        None,
        color=(0, 255, 0),
        flags=cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    frame_with_keypoints_blue = cv.drawKeypoints(
コード例 #50
0
import numpy as np
import cv2
import glob

for fname in glob.glob('*.jpg'):

    I = cv2.imread(fname)
    G = cv2.cvtColor(I,cv2.COLOR_BGR2GRAY)

    #sift = cv2.FeatureDetector_create("SIFT") # opencv 2.x.x
    sift = cv2.xfeatures2d.SIFT_create() # opencv 3.x.x
    # use "sift = cv2.SIFT()" if the above fails
    
    keypoints = sift.detect(G,None)

    cv2.drawKeypoints(G,keypoints,I)
    #cv2.drawKeypoints(G,keypoints,I, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)


    # display keypoint properties
    # for kp in keypoints:
    #    print '-'*40
    #    print 'location=(%.2f,%.2f)'%(kp.pt[0], kp.pt[1])
    #    print 'orientation angle=%1.1f'%kp.angle
    #    print 'scale=%f'%kp.size
    
    
    cv2.putText(I,"Press 'q' to quit, any key for next image",(20,20), \
                cv2.FONT_HERSHEY_SIMPLEX, .5,(255,0,0),1)

    cv2.imshow('sift_keypoints',I)
コード例 #51
0
import cv2
import numpy as np
from matplotlib import pyplot as plt

#filename = '/home/acp/work/ggp/cam_images/camera1/spot2_occupied.jpg'
filename = '/home/acp/work/ggp/cam_images/camera1/snap20160707185220.jpg'

img = cv2.imread(filename)

gray1 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

detector = cv2.AKAZE_create()
(kps, descs) = detector.detectAndCompute(gray1, None)

img2 = cv2.drawKeypoints(img, kps, None, (0, 0, 255), 4)

plt.imshow(img2), plt.show()

cv2.imwrite('kaze_keypoints.jpg', img2)
コード例 #52
0
ファイル: surf.py プロジェクト: Wanghuaichen/3Dreconstruction
def show_keypoints(image, keypoints, descriptors):
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    img = cv2.drawKeypoints(gray, keypoints, image)
    cv2.imshow("img", img)
    cv2.waitKey(0)
コード例 #53
0
        do_motion_fade = False
        if do_motion_fade:
            motion_mask = motion2(new_frame, base)
        else:
            motion_mask = None
        accum = overlay(new_frame, base, motion_mask)

    final = accum
    if args.draw_keypoints:
        new_filtered = []
        if affine_new != None:
            affine_T = affine_new.T
            for kp in filtered:
                a = np.array( [kp.pt[0], kp.pt[1], 1.0] )
                pt = np.dot(a, affine_T)
                new_kp = cv2.KeyPoint(pt[0], pt[1], kp.size)
                new_filtered.append(new_kp)
        final = cv2.drawKeypoints(accum, new_filtered, color=(0,255,0), flags=0)
   
    cv2.imshow('bgr', res1)
    cv2.imshow('smooth', new_frame)
    cv2.imshow('final', final)
    #output.write(res1)
    if args.write:
        video_writer.writeFrame(final[:,:,::-1])
    if 0xFF & cv2.waitKey(5) == 27:
        break

cv2.destroyAllWindows()

コード例 #54
0
    # between minCircularity (inclusive) and maxCircularity (exclusive).
    params.filterByCircularity = True
    params.minCircularity = current_min_circularity
    params.maxCircularity = current_max_circularity

    # Extracted blobs have this ratio between minInertiaRatio (inclusive) and maxInertiaRatio (exclusive).
    params.filterByInertia = True
    params.minInertiaRatio = current_min_inertia
    params.maxInertiaRatio = current_max_inertia

    # Extracted blobs have convexity (area / area of blob convex hull)
    # between minConvexity (inclusive) and maxConvexity (exclusive).
    params.filterByConvexity = True
    params.minConvexity = current_min_convexity
    params.maxConvexity = current_max_convexity

    blob_detector = cv.SimpleBlobDetector_create(params)

    # find the keypoints with SimpleBlobDetector
    kp = blob_detector.detect(img, None)

    # draw only keypoints location,not size and orientation
    img2 = cv.drawKeypoints(img, kp, None, color=(0, 255, 0))

    cv.imshow("SimpleBlobDetector", img2)

    if cv.waitKey(10) & 0xFF == 27:
        break

cv.destroyAllWindows()
コード例 #55
0
#Importing Libraries
import cv2
import numpy as np

# Reading the Image
img = cv2.imread('aloeR.jpg')
grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

# Applying the SIFT feature
sift = cv2.xfeatures2d_SIFT.create()
kp = sift.detect(grey, None)

# Highlighting the Key Points
img = cv2.drawKeypoints(grey, kp,img)
cv2.imshow("SIFT", img)
コード例 #56
0
kp1, des1 = orb.compute(lena, kp1)

kp2 = orb.detect(lena2x, None)

kp2, des2 = orb.compute(lena2x, kp2)

bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

matches = bf.match(des1, des2)

matches = sorted(matches, key=lambda x: x.distance)

result = cv2.drawMatches(lena, kp1, lena2x, kp2, matches[:20], None, flags=2)

lena = cv2.drawKeypoints(lena, kp1, None, color=(0, 255, 0), flags=0)

lena2x = cv2.drawKeypoints(lena2x, kp2, None, color=(0, 255, 0), flags=0)

cv2.imshow('img', lena)

cv2.imshow('img2', lena2x)

cv2.imshow('result', result)

while (1):
    key = cv2.waitKey(5)
    if key != 255:
        print key
"""
while(1):
コード例 #57
0
ファイル: merge.wheel.plot.py プロジェクト: YankongSJTU/CHEA
def image_circle(img_gray, tmpfig, channel, cutoff, sizemin, sizemax):
    ''' channel 0 is red channel, channel=1 is green channel; cutoff value maybed changed '''
    #img_gray=cv2.imread("img_000000000_YFP_007.tif",-1)
    gray2 = gaa(img_gray)
    m = np.zeros(img_gray.size * 3)
    m.shape = (img_gray.shape[0], img_gray.shape[1], 3)
    gray2 = np.uint8(histeq2(img_gray, 255))
    m[:, :, channel] = gray2
    #m=np.uint8(m)

    img = m.copy()

    k5 = cv2.GaussianBlur(gray2, (7, 7), 0)
    #(_, thresh) = cv2.threshold(k5, 45,255, cv2.THRESH_BINARY)

    #sobelx8u = cv2.Sobel(k5,cv2.CV_8U,1,0,ksize=5)
    #sobelx8u2 = cv2.Sobel((255-k5),cv2.CV_8U,1,0,ksize=5)

    #save_img(m,"1.tif")

    sobelx8u = cv2.Sobel(k5, cv2.CV_8U, 1, 0, ksize=3)
    params = cv2.SimpleBlobDetector_Params()
    # Change thresholds
    params.minThreshold = 1

    params.maxThreshold = 254

    # Filter by Area.
    params.filterByArea = True
    params.minArea = sizemin
    params.maxArea = sizemax
    #Filter by Circularity
    params.filterByCircularity = True
    params.minCircularity = 0.001
    # Filter by color

    params.filterByColor = True
    params.blobColor = 255
    # Filter by Convexity
    params.filterByConvexity = 0
    params.minConvexity = 0.01

    # Filter by Inertia
    params.filterByInertia = True
    params.minInertiaRatio = 0.05
    # Create a detector with the parameters
    detector = cv2.SimpleBlobDetector_create(params)
    (_, thresh) = cv2.threshold(sobelx8u, 5, 1, cv2.THRESH_BINARY)
    val = np.multiply(thresh, np.uint8(sobelx8u))

    keypoints = detector.detect(val)
    len(keypoints)
    k = len(keypoints)

    img3 = img.copy()
    real_radius = []
    realpos = []
    real_keypoint = []
    for i in range(k):
        c = (np.uint(keypoints[i].pt[1]), np.uint(keypoints[i].pt[0]))
        if max_near(thresh, c)[0] > 0 and max_near(
                sobelx8u, c)[0] > 30 and max_near(gray2, c)[0] > 30:
            img3 = cv2.circle(
                img3, (c[1], c[0]), 15,
                (0, abs(channel - 1) * 65535, abs(2 - channel) * 65535), 2)
            real_radius.append(keypoints[i].size)
            realpos.append(c)
            real_keypoint.append(keypoints[i])
        #cv2.imwrite('1.tif',img3)

    len(real_keypoint)

    im_with_keypoints = cv2.drawKeypoints(
        gray2, real_keypoint, np.array([]), (0, 0, 255),
        cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

    return img3, realpos, k, sobelx8u, real_radius, im_with_keypoints
コード例 #58
0
    def detect_screws(self, frame, disp):
        grayIN = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # apply GuassianBlur to reduce noise. medianBlur is also added for smoothening, reducing noise.
        gray = cv2.GaussianBlur(grayIN, (3, 3), 0)
        gray = cv2.medianBlur(gray, 5)
        ddepth = cv2.CV_64F

        # Gradient X
        grad_x = cv2.Sobel(gray, ddepth, 1, 0, 3)
        # Gradient Y
        grad_y = cv2.Sobel(gray, ddepth, 0, 1, 3)
        # Total Gradient (approximate)
        grad = cv2.magnitude(grad_x, grad_y)
        grad = cv2.convertScaleAbs(grad)

        # Adaptive Guassian Threshold is to detect sharp edges in the Image. For more information Google it.
        gray = cv2.adaptiveThreshold(grad, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \
                                     cv2.THRESH_BINARY, 11, 15)

        gray = cv2.medianBlur(gray, 3)
        kernel = np.ones((3, 3), np.uint8)
        gray = cv2.erode(gray, kernel, iterations=4)

        # Detect blobs.
        keypoints = self._detector.detect(gray)

        #gray_disp = cv2.drawKeypoints(gray, keypoints, np.array([]), (0, 0, 255),
        #                                          cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

        # cv2.imshow("keypoints", gray_disp)
        # if cv2.waitKey(10) == 27:
        #     pass
        listpoints = []
        pointpts = np.empty((0, 2))

        i = 0
        guesses = []
        #  For each possible point classify
        for point in keypoints:
            # Select region around point and resize
            rect = ((point.pt[0], point.pt[1]), (point.size, point.size), 0)
            box_pts = np.int0(cv2.boxPoints(rect))
            image = self.rot_crop(grayIN, rect, box_pts, self._size, self._size)
            image = image.reshape(1, 28, 28, 1)
            guess = self._model.predict_proba(image)  # Use CNN to predict image contents
            if int(np.argmax(guess[0])) != 4:
                guesses.append(guess[0])
                pointpts = np.append(pointpts, [[point.pt[0], point.pt[1]]], axis=0)
                listpoints.append(point)
            i = i + 1

        i = 0
        #screw_probs = np.zeros(4)
        fourkeypoints = [None] * 4
        tally = np.zeros(5)

        if len(pointpts) > 0:
            closestptsidx = self.find_four_closest(pointpts)

            for p in range(0, len(closestptsidx)):
                idx = closestptsidx[p]
                #if np.max(guesses[idx]) > 0.999999:
                guess = int(np.argmax(guesses[idx]))
                tally[guess] = tally[guess] + 1
                if guess == 4:
                    pass
                else:
                    #screw_probs[p] = prob
                    fourkeypoints[p] = keypoints[idx]

                    if disp:
                        txt = f"{self._CATEGORIES[guess]}"
                        frame = cv2.drawKeypoints(frame, fourkeypoints, np.array([]), (0, 0, 255),
                                                  cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
                        frame = cv2.putText(frame, txt, (int(keypoints[idx].pt[0]), int(keypoints[idx].pt[1])),
                                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)

                i = i + 1

        # Display the resulting frame
        if disp:
            cv2.imshow("Circles", frame)
            if cv2.waitKey(10) == 27:
                pass

        im_screw_probs = np.zeros((4, 5))
        i = 0
        for point in fourkeypoints:
            if point is not None:
                pointint = listpoints.index(point)
                im_screw_probs[i, :] = guesses[pointint]
            i = i+1


        if time.time() - self._lasttime > 1:
            print(tally)
            self._lasttime = time.time()

        return im_screw_probs, tally
コード例 #59
0
ファイル: vid_proc.py プロジェクト: jugatsingh/airbus
params.filterByInertia = False
params.minInertiaRatio = 0.1

# Distance Between Blobs
params.minDistBetweenBlobs = 0.5

# Create a detector with the parameters
detector = cv2.SimpleBlobDetector_create(params)
# Detect blobs.
keypoints = detector.detect(im_thresh)
print(type(keypoints))
print('Total blobs:' + str(len(keypoints)))
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
im_thresh_with_keypoints = cv2.drawKeypoints(
    gray, keypoints, np.array([]), (0, 0, 255),
    cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

cv2.imwrite('wings.jpg', equ)
# gray = cv2.GaussianBlur(gray,(9,9),0)
cv2.imshow('original', gray)
cv2.waitKey(0)
cv2.imshow('equalised', equ)
cv2.waitKey(0)
cv2.imshow("thresh", im_thresh)
cv2.waitKey(0)
cv2.imshow("keypoints", im_thresh_with_keypoints)
cv2.waitKey(0)

# When everything done, release the capture
cap.release()
import cv2 as cv


image = cv.imread("D:/images/dannis2.jpg");
cv.imshow("input", image)

# 创建GFTT特征检测器
gftt = cv.GFTTDetector_create(100, 0.01,1, 3, False, 0.04)
kp1 = gftt.detect(image,None)
result = cv.drawKeypoints(image, kp1, None, (0, 255, 0), cv.DrawMatchesFlags_DEFAULT)

cv.imshow("GFTT-Keypoint-Detect", result)
cv.waitKey(0)
cv.destroyAllWindows()