def getthresholdedimg(im): #this fucntion takes the RGB image. Then converts it into HSV for easy color detection and thresholds it with yellow part imghsv=cv.CreateImage(cv.GetSize(im),8,3) cv.CvtColor(im,imghsv,cv.CV_BGR2HSV) imgthreshold=cv.CreateImage(cv.GetSize(im),8,1) cv.InRangeS(imghsv,cv.Scalar(20,100,100),cv.Scalar(30,255,255),imgthreshold) return imgthreshold
def __init__(self): """ O construtor obtem a referencia da webcam e cria uma janela para exibir as imagens. """ # Variavel que vai definir o estado do monitoramento. self.estado = True # Obtendo a referencia da captura da webCam. self.webCam = cv.CaptureFromCAM(0) # Obtendo a imagem atual da webCam. self.imagem_atual = cv.QueryFrame(self.webCam) if self.imagem_atual is None: stderr.write('A Web Cam esta desligada. Por favor ligue-a\n') exit() else: # Cria uma nova imagem que sera utilizada para descobrir os contornos na imagem_atual. self.imagem_cinza = cv.CreateImage(cv.GetSize(self.imagem_atual), cv.IPL_DEPTH_8U, 1) # Cria uma nova imagem que sera utilizada para converter a imagem atual em 32F. self.imagem_auxiliar = cv.CreateImage(cv.GetSize(self.imagem_atual), cv.IPL_DEPTH_32F, 3) # Imagem sera utilizada para guardar a diferenca entre a imagem atual e anterior. self.imagem_diferenca = None # Obtendo a area total da imagem da webCam. self.area = self.imagem_atual.width * self.imagem_atual.height self.area_corrente = 0 self.imagem_diferenca = cv.CloneImage(self.imagem_atual) self.imagem_anterior = cv.CloneImage(self.imagem_atual) # Tenho que converter a imagem_atual em 32F para poder calcular a media em "RuningAvg". cv.Convert(self.imagem_atual, self.imagem_auxiliar)
def hs_histogram(src): # Convert to HSV hsv = cv.CreateImage(cv.GetSize(src), 8, 3) cv.CvtColor(src, hsv, cv.CV_BGR2HSV) # Extract the H and S planes [rows, cols] = cv.GetSize(src) h_plane = cv.CreateMat(rows, cols, cv.CV_8UC1) s_plane = cv.CreateMat(rows, cols, cv.CV_8UC1) cv.Split(hsv, h_plane, s_plane, None, None) planes = [h_plane, s_plane] h_bins = 30 s_bins = 32 hist_size = [h_bins, s_bins] # hue varies from 0 (~0 deg red) to 180 (~360 deg red again */ h_ranges = [0, 180] # saturation varies from 0 (black-gray-white) to # 255 (pure spectrum color) s_ranges = [0, 255] ranges = [h_ranges, s_ranges] scale = 10 hist = cv.CreateHist([h_bins, s_bins], cv.CV_HIST_ARRAY, ranges, 1) cv.CalcHist([cv.GetImage(i) for i in planes], hist) (_, max_value, _, _) = cv.GetMinMaxHistValue(hist) hist_img = cv.CreateImage((h_bins * scale, s_bins * scale), 8, 3) for h in range(h_bins): for s in range(s_bins): bin_val = cv.QueryHistValue_2D(hist, h, s) intensity = cv.Round(bin_val * 255 / max_value) cv.Rectangle(hist_img, (h * scale, s * scale), ((h + 1) * scale - 1, (s + 1) * scale - 1), cv.RGB(intensity, intensity, intensity), cv.CV_FILLED) return hist_img
def getthresholdedimg(im): # this function take RGB image.Then convert it into HSV for easy colour detection # and threshold it with yellow and blue part as white and all other regions as black.Then return that image global imghsv imghsv = cv.CreateImage(cv.GetSize(im),8,3) # Convert image from RGB to HSV cv.CvtColor(im,imghsv,cv.CV_BGR2HSV) # creates images for blue imgblue = cv.CreateImage(cv.GetSize(im),8,1) # creates blank image to which color images are added imgthreshold = cv.CreateImage(cv.GetSize(im),8,1) # determine HSV color thresholds for yellow, blue, and green # cv.InRange(src, lowerbound, upperbound, dst) # for imgblue, lowerbound is 95, and upperbound is 115 cv.InRangeS(imghsv, cv.Scalar(55,100,100), cv.Scalar(155,255,255), imgblue) #55/155 original, 105 seems to be lower threshold needed to eliminate flag detection # add color thresholds to blank 'threshold' image cv.Add(imgthreshold, imgblue, imgthreshold) return imgthreshold
def normalize(self, image): #Checks whether inputs are correct if self.image_check(image) < 0: return -1 #chaning the image to grayscale gsimage = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_8U, 1) newgsimage = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_8U, 1) cv.CvtColor(image, gsimage, cv.CV_RGB2GRAY) cv.EqualizeHist(gsimage, newgsimage) if self.visualize: while True: cv.NamedWindow("Normal") cv.ShowImage("Normal", gsimage) cv.WaitKey(5) cv.NamedWindow("Histogram Equalized") cv.ShowImage("Histogram Equalized", newgsimage) if cv.WaitKey(5) == 1048603: break cv.DestroyAllWindows() return newgsimage
def redim_picture(img, new_width, new_height): # Anciennes dimensions old_width = cv.GetSize(img)[0] old_height = cv.GetSize(img)[1] # Nouvelles dimensions new_width = int(sys.argv[3]) new_height = int(sys.argv[4]) # On va flouter légèrement pour avoir plus d'épaisseur final_picture = cv2.blur(np.array(img), (100,100)) # On redimensionne final_picture = cv2.resize(np.array(final_picture), (new_width, new_height), interpolation=cv2.INTER_CUBIC) # On va traiter le résultat, et unifier l'image # Petite fonction de seuillage maison h, w = final_picture.shape[:2] for i in range(0, h): for j in range(0, w): pixel_value = final_picture.item(i,j) if pixel_value != 255: final_picture.itemset((i,j),0) # On remet bien l'image en RVB final_picture = cv2.cvtColor(final_picture, cv.CV_GRAY2RGB) return final_picture
def __init__(self, threshold=25, doRecord=True, showWindows=True): self.writer = None self.font = None self.doRecord = doRecord #Either or not record the moving object self.show = showWindows #Either or not show the 2 windows self.frame = None self.capture = cv.CaptureFromCAM(0) self.frame = cv.QueryFrame( self.capture) #Take a frame to init recorder if doRecord: self.initRecorder() self.gray_frame = cv.CreateImage(cv.GetSize(self.frame), cv.IPL_DEPTH_8U, 1) self.average_frame = cv.CreateImage(cv.GetSize(self.frame), cv.IPL_DEPTH_32F, 3) self.absdiff_frame = None self.previous_frame = None self.surface = self.frame.width * self.frame.height self.currentsurface = 0 self.currentcontours = None self.threshold = threshold self.isRecording = False self.trigger_time = 0 #Hold timestamp of the last detection if showWindows: cv.NamedWindow("Image") cv.CreateTrackbar("Detection treshold: ", "Image", self.threshold, 100, self.onChange)
def get_image(camera, filename=None): im = cv.QueryFrame(camera) # take greyscale and compute RMS value im2 = cv.CreateImage(cv.GetSize(im), cv.IPL_DEPTH_32F, 3) cv.Convert(im, im2) gray = cv.CreateImage(cv.GetSize(im), cv.IPL_DEPTH_32F, 1) cv.CvtColor(im2, gray, cv.CV_RGB2GRAY) gray_mat = cv.GetMat(gray) img = numpy.asarray(gray_mat) power = numpy.sqrt(numpy.mean(img**2)) #save file if filename: font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.8, 0.8, 0, 2, cv.CV_AA) cv.PutText(im, filename, (DATE_X, DATE_Y), font, cv.RGB(255, 255, 0)) filename = os.path.join(DIR_PREFIX, filename + '.jpg') print filename cv.SaveImage(filename, im) del font else: filename = '' #del(camera) del im, im2, gray, img, gray_mat return (power, filename)
def __init__(self, threshold=20, showWindows=False): self.writer = None self.font = None self.show = showWindows #Either or not show the 2 windows self.frame = None root_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) os.chdir(os.path.join(root_path, "etc", "video")) self.capture = cv.CaptureFromCAM(0) self.frame = cv.QueryFrame( self.capture) #Take a frame to init recorder self.gray_frame = cv.CreateImage(cv.GetSize(self.frame), cv.IPL_DEPTH_8U, 1) self.average_frame = cv.CreateImage(cv.GetSize(self.frame), cv.IPL_DEPTH_32F, 3) self.absdiff_frame = None self.previous_frame = None self.surface = self.frame.width * self.frame.height self.currentsurface = 0 self.currentcontours = None self.threshold = threshold self.isRecording = False self.trigger_time = 0 #Hold timestamp of the last detection if showWindows: cv.NamedWindow("Image") cv.CreateTrackbar("Detection treshold: ", "Image", self.threshold, 100, self.onChange)
def getDepth(self, image, image2): grayScaleFullImage = cv.CreateImage((image.width, image.height), 8, 1) cv.CvtColor(image, grayScaleFullImage, cv.CV_BGR2GRAY) grayScaleFullImage2 = cv.CreateImage((image2.width, image2.height), 8, 1) cv.CvtColor(image2, grayScaleFullImage2, cv.CV_BGR2GRAY) [mat_w, mat_h] = self.size r = cv.CreateMat(mat_h, mat_w, cv.CV_8UC1) r2 = cv.CreateMat(mat_h, mat_w, cv.CV_8UC1) print type(r) print type(image) print type(self.map1x) print cv.GetSize(r) print cv.GetSize(self.map1x) cv.Remap(grayScaleFullImage, r, self.map1x, self.map1y) cv.Remap(grayScaleFullImage2, r2, self.map2x, self.map2y) cv.ShowImage("win3", r) cv.ShowImage("win4", r2) #stereo_match that comes in opencv # disparity range is tuned for 'aloe' image pair window_size = 3 min_disp = 16 num_disp = 112 - min_disp stereo = cv2.StereoSGBM(minDisparity=min_disp, numDisparities=num_disp, SADWindowSize=window_size, uniquenessRatio=10, speckleWindowSize=100, speckleRange=32, disp12MaxDiff=1, P1=8 * 3 * window_size ** 2, P2=32 * 3 * window_size ** 2, fullDP=False ) print 'computing disparity...' disp = stereo.compute(np.asarray(r), np.asarray(r2)).astype(np.float32) / 16.0 print 'generating 3d point cloud...' points = cv2.reprojectImageTo3D(disp, np.asarray(self.Q)) colors = cv2.cvtColor(np.asarray(r), cv2.COLOR_GRAY2RGB) mask = disp > disp.min() out_points = points[mask] out_colors = colors[mask] # Resulting .ply file cam be easily viewed using MeshLab ( http://meshlab.sourceforge.net out_fn = 'out.ply' write_ply('out.ply', out_points, out_colors) print '%s saved' % 'out.ply' cv2.imshow('disparity', (disp - min_disp) / num_disp)
def collectCheckboardPoints(self): self.pointsArray1 = np.zeros((nimages, num_pts, 2)) self.pointsArray2 = np.zeros((nimages, num_pts, 2)) cv.NamedWindow("camera") cv.NamedWindow("camera2") i = 0 while True : frame = cv.QueryFrame(self.video1) # print type(frame) # [rows1, cols] = cv.GetSize(frame) image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, frame.nChannels) cv.Copy(frame, image) cv.ShowImage("camera", frame) grayScaleFullImage = cv.CreateImage((image.width, image.height), 8, 1) cv.CvtColor(image, grayScaleFullImage, cv.CV_BGR2GRAY) frame2 = cv.QueryFrame(self.video2) image2 = cv.CreateImage(cv.GetSize(frame2), cv.IPL_DEPTH_8U, frame2.nChannels) cv.Copy(frame2, image2) cv.ShowImage("camera2", frame2) grayScaleFullImage2 = cv.CreateImage((image2.width, image2.height), 8, 1) cv.CvtColor(image2, grayScaleFullImage2, cv.CV_BGR2GRAY) found, points = cv.FindChessboardCorners(grayScaleFullImage, dims, cv.CV_CALIB_CB_ADAPTIVE_THRESH) if found != 0: print "found chess board " + str(np.shape(points)) cv.DrawChessboardCorners(image, dims, points, found) cv.ShowImage("win2", image) cv.WaitKey(2) # else: # print "no chess" found2, points2 = cv.FindChessboardCorners(grayScaleFullImage2, dims, cv.CV_CALIB_CB_ADAPTIVE_THRESH) if found2 != 0: print "found chess board2" cv.DrawChessboardCorners(image2, dims, points2, found2) cv.ShowImage("win3", image2) cv.WaitKey(2) if found and found2: print "entered here!!!!!" self.pointsArray1[i, :] = points self.pointsArray2[i, :] = points2 i = i + 1 if i == nimages: self.size = cv.GetSize(image) break if cv.WaitKey(10) == 27: break cv.DestroyWindow("Camera 1") cv.DestroyWindow("Camera 2")
def difference_image(img1, img2): print " simg1 = simplify(img1)" simg1 = simplify(img1) print " simg2 = simplify(img2)" simg2 = simplify(img2) #dbg_image('simg1',simg1) #dbg_image('simg2',simg2) #create image buffers img3 = cv.CreateImage(cv.GetSize(img2), cv.IPL_DEPTH_8U, 1) simg3 = cv.CloneImage(img3) bitimage = cv.CreateImage(cv.GetSize(img2), cv.IPL_DEPTH_8U, 1) eimg3 = cv.CloneImage(bitimage) #process print " cv.AbsDiff(simg2,simg1,img3)" cv.AbsDiff(simg2, simg1, img3) print " cv.Smooth(img3,simg3)" cv.Smooth(img3, simg3) #dbg_image('simg3',simg3) # these threshold values must be calibrated #cv.Threshold(simg3,bitimage,16,255,cv.CV_THRESH_TOZERO_INV) print " cv.Threshold(simg3,bitimage,16,255,cv.CV_THRESH_BINARY)" cv.Threshold(simg3, bitimage, 50, 255, cv.CV_THRESH_BINARY) #dbg_image('bitimage',bitimage) print " cv.Erode(bitimage,eimg3)" cv.Erode(bitimage, eimg3) #dbg_image('eimg3',eimg3) return eimg3
def init_capture_device(device=-1): print "init capture device" capture = cv.CreateCameraCapture(device) # check that capture device is OK if not capture: print "Error opening capture device" sys.exit(1) #call specific camera initialization camera_init(capture) #Get inital image to set parameters image = cv.QueryFrame(capture) #inital image CAMERA_WIDTH = cv.GetSize(image)[0] #print CAMERA_WIDTH CAMERA_HEIGHT = cv.GetSize(image)[1] #print CAMERA_HEIGHT #load calibration intrinsics = cv.Load(INTRINSICS_FILE) distortion = cv.Load(DISTORTION_FILE) mapx = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_32F, 1) mapy = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_32F, 1) cv.InitUndistortMap(intrinsics, distortion, mapx, mapy) return capture, mapx, mapy
def getthresholdedimg(im): imghsv = cv.CreateImage(cv.GetSize(im), 8, 3) cv.CvtColor(im, imghsv, cv.CV_BGR2HSV) # Convert image from RGB to HSV imgthreshold = cv.CreateImage(cv.GetSize(im), 8, 1) cv.InRangeS(imghsv, cv.Scalar(23, 100, 100), cv.Scalar(25, 255, 255), imgthreshold) ## catch the orange yellow blob return imgthreshold
def cannyGradient(self, image, t1=20, t2=250): '''Returns the canny gradient''' #Checks whether inputs are correct if self.image_check(image) < 0: return -1 #Converts the image if it is not B&W gsimage = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_8U, 1) if image.channels > 1: temp = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_8U, 1) cv.CvtColor(image, temp, cv.CV_BGR2GRAY) gsimage = temp else: gsimage = image #Gets the edges from the image edges = cv.CreateImage(cv.GetSize(gsimage), cv.IPL_DEPTH_8U, 1) #Warning: the threshold 1 and threshold 2 should be selected by experiment cv.Canny(gsimage, edges, threshold1=t1, threshold2=t2) if self.visualize: while True: cv.NamedWindow("Original") cv.ShowImage("Original", gsimage) cv.NamedWindow("Edges") cv.ShowImage("Edges", edges) c = cv.WaitKey(5) if c > 0: break cv.DestroyAllWindows() return edges
def getFeatures(self, grey): """ Returns a list of features generated by the OpenCV GoodFeaturesToTrack() function in the gray scale image 'gray'. """ #cv.ShowImage ('getFeatures() grey',grey) eig = cv.CreateImage(cv.GetSize(grey), 32, 1) temp = cv.CreateImage(cv.GetSize(grey), 32, 1) mask = cv.CreateImage(cv.GetSize(grey), 8, 1) # Create a mask image to hide the top 10% of the image (which contains text) (w, h) = cv.GetSize(grey) cv.Rectangle(mask, (0, 0), (w, h), cv.Scalar(255, 0, 0), -1) cv.Rectangle(mask, (0, 0), (w, int(0.1 * h)), cv.Scalar(0, 0, 0), -1) # cv.ShowImage ('mask',mask) # search for the good points feat = cv.GoodFeaturesToTrack(grey, eig, temp, self.MAX_COUNT, self.quality, self.min_distance, mask, 3, 0, 0.04) print "found %d features (MAX_COUNT=%d)" % (len(feat), self.MAX_COUNT) # refine the corner locations feat = cv.FindCornerSubPix( grey, feat, (self.win_size, self.win_size), (-1, -1), (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03)) return (feat)
def handel_camera_image(img, hc): #resize it img2 = cv.CreateMat( cv.GetSize(img)[1] / 2, cv.GetSize(img)[0] / 2, cv.CV_8UC3) cv.Resize(img, img2) #convert to grayscale img_gray = cv.CreateImage(cv.GetSize(img2), 8, 1) cv.CvtColor(img2, img_gray, cv.CV_RGB2GRAY) #set the final image img_f = img_gray #detect faces from it objects = cv.HaarDetectObjects(img_f, hc, cv.CreateMemStorage()) number_of_faces = len(objects) if number_of_faces != 1: if debug: print "Error! Number of detected faces: " + str(number_of_faces) return None else: for (x, y, w, h), n in objects: #annotate the image cv.Rectangle(img_f, (x, y), (x + w, y + h), 255) if debug: print "FACE -> h: " + str(h) + ", w: " + str( w) + ", r(w/h): " + str(float(w) / float(h)) #resize to 64 to 64 img_r = resize_crop_img(img_f, x, y, w, h) return (img_f, img_r)
def __init__(self, threshold=1): self.timeSinceLastMoved = None self.timeSinceLastLog = time.time() self.timeSinceClean = time.time() self.writer = None self.font = None self.frame = None # Incase logging needs to be turned on # self.log = sys.stdout # self.logFile = open("logFile.log", "w") # sys.stdout = self.logFile # Monitor on/or off self.isMonitorOn = True self.capture = cv.CaptureFromCAM(0) self.frame = cv.QueryFrame( self.capture) #Take a frame to init recorder self.gray_frame = cv.CreateImage(cv.GetSize(self.frame), cv.IPL_DEPTH_8U, 1) self.average_frame = cv.CreateImage(cv.GetSize(self.frame), cv.IPL_DEPTH_32F, 3) self.absdiff_frame = None self.previous_frame = None self.surface = self.frame.width * self.frame.height self.currentsurface = 0 self.currentcontours = None self.threshold = threshold self.trigger_time = 0 # Hold timestamp of the last detection
def run(self): first_frame = True while True: frame = cv.QueryFrame(self.capture) if first_frame: gray = cv.CreateImage(cv.GetSize(frame), 8, 1) prev_gray = cv.CreateImage(cv.GetSize(frame), 8, 1) flow = cv.CreateImage(cv.GetSize(frame), 32, 2) self.cflow = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(frame, gray, cv.CV_BGR2GRAY) if not first_frame: cv.CalcOpticalFlowFarneback(prev_gray, gray, flow, pyr_scale=0.5, levels=3, winsize=15, iterations=3, poly_n=5, poly_sigma=1.2, flags=0) self.draw_flow(flow, prev_gray) c = cv.WaitKey(7) if c in [27, ord('q'), ord('Q')]: break prev_gray, gray = gray, prev_gray first_frame = False
def GoodFeaturesToTrack(image, max_count=100, quality=0.1, min_distance=1): grey = toGreyScale(image) eig = cv.CreateImage(cv.GetSize(grey), 32, 1) temp = cv.CreateImage(cv.GetSize(grey), 32, 1) return cv.GoodFeaturesToTrack(grey, eig, temp, max_count, quality, min_distance, None, 3, 0, 0.04)
def update_mhi(img, dst, diff_threshold): global last global mhi global storage global mask global orient global segmask timestamp = time.clock() / CLOCKS_PER_SEC # get current time in seconds size = cv.GetSize(img) # get current frame size idx1 = last if not mhi or cv.GetSize(mhi) != size: for i in range(N): buf[i] = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1) cv.Zero(buf[i]) mhi = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1) cv.Zero(mhi) # clear MHI at the beginning orient = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1) segmask = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1) mask = cv.CreateImage(size,cv. IPL_DEPTH_8U, 1) cv.CvtColor(img, buf[last], cv.CV_BGR2GRAY) # convert frame to grayscale idx2 = (last + 1) % N # index of (last - (N-1))th frame last = idx2 silh = buf[idx2] cv.AbsDiff(buf[idx1], buf[idx2], silh) # get difference between frames cv.Threshold(silh, silh, diff_threshold, 1, cv.CV_THRESH_BINARY) # and threshold it cv.UpdateMotionHistory(silh, mhi, timestamp, MHI_DURATION) # update MHI cv.CvtScale(mhi, mask, 255./MHI_DURATION, (MHI_DURATION - timestamp)*255./MHI_DURATION) cv.Zero(dst) cv.Merge(mask, None, None, None, dst) cv.CalcMotionGradient(mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3) if not storage: storage = cv.CreateMemStorage(0) seq = cv.SegmentMotion(mhi, segmask, storage, timestamp, MAX_TIME_DELTA) for (area, value, comp_rect) in seq: if comp_rect[2] + comp_rect[3] > 100: # reject very small components color = cv.CV_RGB(255, 0,0) silh_roi = cv.GetSubRect(silh, comp_rect) mhi_roi = cv.GetSubRect(mhi, comp_rect) orient_roi = cv.GetSubRect(orient, comp_rect) mask_roi = cv.GetSubRect(mask, comp_rect) angle = 360 - cv.CalcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION) count = cv.Norm(silh_roi, None, cv.CV_L1, None) # calculate number of points within silhouette ROI if count < (comp_rect[2] * comp_rect[3] * 0.05): continue magnitude = 30. center = ((comp_rect[0] + comp_rect[2] / 2), (comp_rect[1] + comp_rect[3] / 2)) cv.Circle(dst, center, cv.Round(magnitude*1.2), color, 3, cv.CV_AA, 0) cv.Line(dst, center, (cv.Round(center[0] + magnitude * cos(angle * cv.CV_PI / 180)), cv.Round(center[1] - magnitude * sin(angle * cv.CV_PI / 180))), color, 3, cv.CV_AA, 0)
def run(self): hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0, 180)], 1) backproject_mode = True while True: frame = cv.QueryFrame(self.capture) # Convert to HSV and keep the hue hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.Split(hsv, self.hue, None, None, None) # Compute back projection backproject = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CalcArrBackProject([self.hue], backproject, hist) # Run the cam-shift (if the a window is set and != 0) if self.track_window and is_rect_nonzero(self.track_window): crit = (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1) (iters, (area, value, rect), track_box) = cv.CamShift(backproject, self.track_window, crit) #Call the camshift !! self.track_window = rect #Put the current rectangle as the tracked area # If mouse is pressed, highlight the current selected rectangle and recompute histogram if self.drag_start and is_rect_nonzero(self.selection): sub = cv.GetSubRect(frame, self.selection) #Get specified area #Make the effect of background shadow when selecting a window save = cv.CloneMat(sub) cv.ConvertScale(frame, frame, 0.5) cv.Copy(save, sub) #Draw temporary rectangle x, y, w, h = self.selection cv.Rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255)) #Take the same area but in hue image to calculate histogram sel = cv.GetSubRect(self.hue, self.selection) cv.CalcArrHist([sel], hist, 0) #Used to rescale the histogram with the max value (to draw it later on) (_, max_val, _, _) = cv.GetMinMaxHistValue(hist) if max_val != 0: cv.ConvertScale(hist.bins, hist.bins, 255. / max_val) elif self.track_window and is_rect_nonzero( self.track_window): #If window set draw an elipseBox cv.EllipseBox(frame, track_box, cv.CV_RGB(255, 0, 0), 3, cv.CV_AA, 0) cv.ShowImage("CamShiftDemo", frame) cv.ShowImage("Backprojection", backproject) cv.ShowImage("Histogram", self.hue_histogram_as_image(hist)) c = cv.WaitKey(7) % 0x100 if c == 27: break
def determineMarkerQuality_naive(self, frame_org): phase = np.exp((self.limitAngleToRange(-self.orientation)) * 1j) t1_temp = self.kernelComplex * np.power(phase, self.order) t1 = t1_temp.real > self.threshold t2_temp = self.kernelComplex * np.power(phase, self.order) t2 = t2_temp.real < -self.threshold img_t1_t2_diff = t1.astype(np.float32) - t2.astype(np.float32) angleThreshold = 3.14 / (2 * self.order) t3 = np.angle(self.KernelRemoveArmComplex * phase) < angleThreshold t4 = np.angle(self.KernelRemoveArmComplex * phase) > -angleThreshold mask = 1 - 2 * (t3 & t4) template = (img_t1_t2_diff) * mask template = cv.fromarray(1 - template) (xm, ym) = self.lastMarkerLocation y1 = ym - int(math.floor(float(self.kernelSize / 2))) y2 = ym + int(math.ceil(float(self.kernelSize / 2))) x1 = xm - int(math.floor(float(self.kernelSize / 2))) x2 = xm + int(math.ceil(float(self.kernelSize / 2))) try: frame = frame_org[y1:y2, x1:x2] except (TypeError): self.quality = 0 return w, h = cv.GetSize(frame) im_dst = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1) cv.Threshold(frame, im_dst, 128, 1, cv.CV_THRESH_BINARY) matches = 0 blacks = 0 w, h = cv.GetSize(im_dst) for x in xrange(w): for y in xrange(h): if cv.Get2D(im_dst, y, x)[0] == 0: # if pixel is black blacks += 1 if cv.Get2D(im_dst, y, x)[0] == cv.Get2D(template, y, x)[0]: matches += 1 else: continue # self.quality = float(matches)/(w*h) self.quality = float(matches) / blacks im_dst = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1) cv.Threshold(frame, im_dst, 115, 255, cv.CV_THRESH_BINARY) cv.ShowImage("small_image", im_dst) cv.ShowImage("temp_kernel", template)
def getthresholdedimg(im): '''this function take RGB image.Then convert it into HSV for easy colour detection and threshold it with yellow part as white and all other regions as black.Then return that image''' imghsv = cv.CreateImage(cv.GetSize(im), 8, 3) cv.CvtColor(im, imghsv, cv.CV_BGR2HSV) # Convert image from RGB to HSV imgthreshold = cv.CreateImage(cv.GetSize(im), 8, 1) cv.InRangeS(imghsv, cv.Scalar(20, 100, 100), cv.Scalar(30, 255, 255), imgthreshold) # Select a range of yellow color return imgthreshold
def get_image(self): rgb = kv.GetRGB() temp = cv.CreateImage( (cv.GetSize(rgb)[0] / self.scale, cv.GetSize(rgb)[1] / self.scale), cv.IPL_DEPTH_8U, 3) cv.Resize(rgb, temp) rgb = temp return kv.GetRGB()
def find_Lines(im): out = cv.CreateImage(cv.GetSize(im), 8, 1) tmp = cv.CreateImage(cv.GetSize(im), 8, 3) storage = cv.CreateMemStorage(0) cv.Canny(im, out, 50, 200, 3) cv.CvtColor(out, tmp, cv.CV_GRAY2BGR) return cv.HoughLines2(out, storage, cv.CV_HOUGH_STANDARD, 1, pi / 180, 100, 0, 0)
def run(self): hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0, 180)], 1) backproject_mode = False while True: frame = 0 frame = self.capture #cv.QueryFrame( self.capture ) # Convert to HSV and keep the hue hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.Split(hsv, self.hue, None, None, None) # Compute back projection backproject = cv.CreateImage(cv.GetSize(frame), 8, 1) # Run the cam-shift cv.CalcArrBackProject([self.hue], backproject, hist) # if self.track_window and is_rect_nonzero(self.track_window): # crit = ( cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1) # (iters, (area, value, rect), track_box) = cv.CamShift(backproject, self.track_window, crit) # self.track_window = rect # If mouse is pressed, highlight the current selected rectangle # and recompute the histogram if self.drag_start and is_rect_nonzero(self.selection): sub = cv.GetSubRect(frame, self.selection) save = cv.CloneMat(sub) #cv.ConvertScale(frame, frame, 0.5) cv.Copy(save, sub) x, y, w, h = self.selection cv.Rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255)) sel = cv.GetSubRect(self.hue, self.selection) cv.CalcArrHist([sel], hist, 0) (_, max_val, _, _) = cv.GetMinMaxHistValue(hist) if max_val != 0: cv.ConvertScale(hist.bins, hist.bins, 255. / max_val) # elif self.track_window and is_rect_nonzero(self.track_window): # cv.EllipseBox( frame, track_box, cv.CV_RGB(255,0,0), 3, cv.CV_AA, 0 ) if not backproject_mode: cv.ShowImage("SelectROI", frame) else: cv.ShowImage("SelectROI", backproject) cv.ShowImage("Histogram", self.hue_histogram_as_image(hist)) c = cv.WaitKey(7) % 0x100 if c == 27: f = open('newtree.yaml', "w") yaml.dump(self.selection, f) f.close() break elif c == ord("b"): backproject_mode = not backproject_mode
def repeat(): global capture #declare as globals since we are assigning to them now global camera_index global done frame = cv.QueryFrame(capture) cv.Smooth(frame, frame, cv.CV_GAUSSIAN, 3, 3) imgHsv = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(frame, imgHsv, cv.CV_BGR2HSV) #imgHsv2 = GetThresholdedImage(imgHsv) #print(numpy.asarray(cv.GetMat(imgHsv))) imgRGBA = cv.CreateImage(cv.GetSize(frame), 8, 4) cv.CvtColor(frame, imgRGBA, cv.CV_BGR2RGBA) cv.Smooth(imgRGBA, imgRGBA, cv.CV_GAUSSIAN, 3, 3) (filteredImg, offsetX, offsetY) = parallelSumRed(imgRGBA, 640, 480) #3D array d = numpy.sqrt(offsetX * offsetX + offsetY * offsetY) if d != 0: print("Distance = " + str(c1 / d + c2) + "cm") print("OffsetX = " + str(offsetX) + "; OffsetY = " + str(offsetY)) print("") imgRGB = cv.CreateImage(cv.GetSize(frame), 8, 3) #cv.CvtColor(Image.fromarray(filteredImg), imgRGB, cv.CV_RGBA2RGB) imgRGBA = cv.fromarray(numpy.reshape(filteredImg, (480, 640, 4))) if offsetX != 0 or offsetY != 0: cv.Rectangle(imgRGBA, (320 + offsetX - 6, 240 + offsetY - 6), (320 + offsetX + 6, 240 + offsetY + 6), (255, 0, 255, 255), 1, 8) cv.Line(imgRGBA, (0, 240 + offsetY), (639, 240 + offsetY), (255, 0, 255, 255), 1, 8) cv.Line(imgRGBA, (320 + offsetX, 0), (320 + offsetX, 479), (255, 0, 255, 255), 1, 8) cv.ShowImage(HSVWindow, imgRGBA) cv.ShowImage(original, frame) cv.SetMouseCallback(original, onMouseMove, [ cv.CV_EVENT_MOUSEMOVE, numpy.asarray(cv.GetMat(imgHsv)), numpy.asarray(cv.GetMat(frame)) ]) #cv.SetMouseCallback(HSVWindow, onMouseMove, [cv.CV_EVENT_MOUSEMOVE, numpy.asarray(cv.GetMat(imgHsv)), numpy.asarray(cv.GetMat(frame))]) #cv.ShowImage(filtered, imgHsv2) c = cv.WaitKey(10) if (str(c) == "27"): #if ESC is pressed print("Thank You!") done = True if (str(c) == "99"): #'c' for calibration calibration(int(input("How many data points: ")))
def initialize(self, frame): # Initialize # log_file_name = "tracker_output.log" # log_file = file( log_file_name, 'a' ) print str(type(frame)) print "resize to ::: " + str(cv.GetSize(frame)) + " " + str(type(frame)) (w, h) = cv.GetSize(frame) # gray = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1) size = (w, h) #cv.GetSize(frame)#(300 , 300) self.thumbnail = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3) self.grey_average_image = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1) self.grey_original_image = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1) # cv.CvtColor(display_image, gray, cv.CV_RGB2GRAY) # prev_image = gray # Greyscale image, thresholded to create the motion mask: self.grey_image = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1) # The RunningAvg() function requires a 32-bit or 64-bit image... self.running_average_image = cv.CreateImage(size, cv.IPL_DEPTH_32F, 3) # ...but the AbsDiff() function requires matching image depths: self.running_average_in_display_color_depth = cv.CloneImage(self.thumbnail) # RAM used by FindContours(): self.mem_storage = cv.CreateMemStorage(0) # The difference between the running average and the current frame: self.difference = cv.CloneImage(self.thumbnail) self.target_count = 1 self.last_target_count = 1 self.last_target_change_t = 0.0 self.k_or_guess = 1 self.codebook = [] self.last_frame_entity_list = [] self.frame_count = 0 # For toggling display: image_list = [ "camera", "difference", "threshold", "display", "faces" ] image_index = 3 # Index into image_list # Prep for text drawing: text_font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX, .5, .5, 0.0, 1, cv.CV_AA) text_coord = (5, 15) text_color = cv.CV_RGB(255, 255, 255) # Set this to the max number of targets to look for (passed to k-means): self.max_targets = 5
def get_working(self): (width, height) = cv.GetSize(self.image) dest = cv.CreateMat(height, width, cv.CV_8UC3) mask8x1 = cv.CreateImage(cv.GetSize(self.image), 8, 1) cv.Zero(mask8x1) cv.FillConvexPoly(mask8x1, self.cur_contour, cv.ScalarAll(255)) # Could 8x3 mask copy but histogram mask will take care of it cv.Copy(self.image, dest) return (dest, mask8x1)