def collectCheckboardPoints(self): self.pointsArray1 = np.zeros((nimages, num_pts, 2)) self.pointsArray2 = np.zeros((nimages, num_pts, 2)) cv.NamedWindow("camera") cv.NamedWindow("camera2") i = 0 while True : frame = cv.QueryFrame(self.video1) # print type(frame) # [rows1, cols] = cv.GetSize(frame) image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, frame.nChannels) cv.Copy(frame, image) cv.ShowImage("camera", frame) grayScaleFullImage = cv.CreateImage((image.width, image.height), 8, 1) cv.CvtColor(image, grayScaleFullImage, cv.CV_BGR2GRAY) frame2 = cv.QueryFrame(self.video2) image2 = cv.CreateImage(cv.GetSize(frame2), cv.IPL_DEPTH_8U, frame2.nChannels) cv.Copy(frame2, image2) cv.ShowImage("camera2", frame2) grayScaleFullImage2 = cv.CreateImage((image2.width, image2.height), 8, 1) cv.CvtColor(image2, grayScaleFullImage2, cv.CV_BGR2GRAY) found, points = cv.FindChessboardCorners(grayScaleFullImage, dims, cv.CV_CALIB_CB_ADAPTIVE_THRESH) if found != 0: print "found chess board " + str(np.shape(points)) cv.DrawChessboardCorners(image, dims, points, found) cv.ShowImage("win2", image) cv.WaitKey(2) # else: # print "no chess" found2, points2 = cv.FindChessboardCorners(grayScaleFullImage2, dims, cv.CV_CALIB_CB_ADAPTIVE_THRESH) if found2 != 0: print "found chess board2" cv.DrawChessboardCorners(image2, dims, points2, found2) cv.ShowImage("win3", image2) cv.WaitKey(2) if found and found2: print "entered here!!!!!" self.pointsArray1[i, :] = points self.pointsArray2[i, :] = points2 i = i + 1 if i == nimages: self.size = cv.GetSize(image) break if cv.WaitKey(10) == 27: break cv.DestroyWindow("Camera 1") cv.DestroyWindow("Camera 2")
def _mixImageAlphaMask(self, wipeSettings, level, image1, image2, image2mask, mixMat): if(level < 0.99): wipeMode, wipePostMix, wipeConfig = wipeSettings if((wipeMode == WipeMode.Fade) or (wipeMode == WipeMode.Default)): valueCalc = int(256 * (1.0 - level)) rgbColor = cv.CV_RGB(valueCalc, valueCalc, valueCalc) whiteColor = cv.CV_RGB(255, 255, 255) cv.Set(mixMat, whiteColor) cv.Set(mixMat, rgbColor, image2mask) cv.Mul(image1, mixMat, image1, 0.004) valueCalc = int(256 * level) rgbColor = cv.CV_RGB(valueCalc, valueCalc, valueCalc) cv.Zero(mixMat) cv.Set(mixMat, rgbColor, image2mask) cv.Mul(image2, mixMat, image2, 0.004) cv.Add(image1, image2, image1) return image1 else: if(wipePostMix == False): image2, image2mask = self._wipeImage(wipeMode, wipeConfig, level, image2, image2mask, mixMat, False) cv.Copy(image2, image1, image2mask) return image1 else: cv.Copy(image1, mixMat) cv.Copy(image2, mixMat, image2mask) return self._wipeMix(wipeMode, wipeConfig, level, image1, mixMat, image2) cv.Copy(image2, image1, image2mask) return image1
def __call__(self): ''' Returns true if motion is detected ''' started = time.time() counter = 0 thresh = 7 skipFrames = int(cv2.VideoCapture(self.file).get(cv.CV_CAP_PROP_FRAME_COUNT)/self.numFrameCheck) for i in tqdm(range(self.numFrameCheck)): ret, curframe = self.capture.read() for i in range(skipFrames-1): ret, curframe = self.capture.read() if not self.getFrame(): break curframe = self.conv(curframe) instant = time.time() #Get timestamp o the frame self.processImage(curframe) #Process the image if self.somethingHasMoved(): counter += 1 if counter >= thresh: return True cv.Copy(self.frame2gray, self.frame1gray) c=cv.WaitKey(1) % 0x100 if c==27 or c == 10: #Break if user enters 'Esc'. break return False
def url_jpg_contours(url): position = 100 filedata = urllib2.urlopen(url).read() imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1) cv.SetData(imagefiledata, filedata, len(filedata)) im = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR) col_edge = cv.CreateImage((im.width, im.height), 8, 3) # convert to grayscale gray_im = cv.CreateImage((im.width, im.height), 8, 1) edge_im = cv.CreateImage((im.width, im.height), 8, 1) cv.CvtColor(im, gray_im, cv.CV_BGR2GRAY) cv.Canny(gray_im, edge_im, position, position * 3, 3) cv.SetZero(col_edge) # copy edge points cv.Copy(im, col_edge, edge_im) edge_im_array = np.asarray(edge_im[:]) ret, edge_im_array = cv2.threshold(edge_im_array, 127, 255, cv2.THRESH_BINARY) contours, hierarchy = cv2.findContours(edge_im_array, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) scale = 10000.0 points = [] for contour in contours: for i in contour: for j in i: lng_offset = j[0] / scale lat_offset = j[1] / scale points.append([lng_offset, lat_offset]) return points
def render_with_histogram(img): '''Just a utility to draw a grayscale histogram next to an image.''' _, _, width, height = cv.GetImageROI(img) canvas = cv.CreateImage((width + 200, max(height, 255)), cv.IPL_DEPTH_8U, 1) cv.Rectangle(canvas, (width, 0), (width + 200, height), (0), cv.CV_FILLED) cv.SetImageROI(canvas, (0, 0, width, height)) cv.Copy(img, canvas) cv.SetImageROI(canvas, (width, 0, 200, canvas.height)) hist = cv.CreateHist([255], cv.CV_HIST_ARRAY, [(0,255)], 1) cv.CalcHist([img], hist) values = [cv.QueryHistValue_1D(hist, n) for n in range(255)] max_value = max(values) for n, value in enumerate(values): cv.Rectangle(canvas, (0, n), (int((value / max_value) * 200), n + 1), (255), cv.CV_FILLED) cv.SetImageROI(canvas, (0, 0, canvas.width, canvas.height)) return canvas
def area_to_image(self, lat, lon, width, height, ground_width, zoom=None, ordered=True): '''return an RGB image for an area of land, with ground_width in meters, and width/height in pixels. lat/lon is the top left corner. The zoom is automatically chosen to avoid having to grow the tiles''' img = cv.CreateImage((width,height),8,3) tlist = self.area_to_tile_list(lat, lon, width, height, ground_width, zoom) # order the display by distance from the middle, so the download happens # close to the middle of the image first if ordered: (midlat, midlon) = self.coord_from_area(width/2, height/2, lat, lon, width, ground_width) tlist.sort(key=lambda d: d.distance(midlat, midlon), reverse=True) for t in tlist: scaled_tile = self.scaled_tile(t) w = min(width - t.dstx, scaled_tile.width - t.srcx) h = min(height - t.dsty, scaled_tile.height - t.srcy) if w > 0 and h > 0: cv.SetImageROI(scaled_tile, (t.srcx, t.srcy, w, h)) cv.SetImageROI(img, (t.dstx, t.dsty, w, h)) cv.Copy(scaled_tile, img) cv.ResetImageROI(img) cv.ResetImageROI(scaled_tile) # return as an RGB image cv.CvtColor(img, img, cv.CV_BGR2RGB) return img
def run(self): started = time.time() while True: curframe = cv.QueryFrame(self.capture) instant = time.time() #Get timestamp o the frame self.processImage(curframe) #Process the image if not self.isRecording: if self.somethingHasMoved(): self.trigger_time = instant #Update the trigger_time if instant > started +5:#Wait 5 second after the webcam start for luminosity adjusting etc.. print "Something is moving !" if self.doRecord: #set isRecording=True only if we record a video self.isRecording = True else: if instant >= self.trigger_time +10: #Record during 10 seconds print "Stop recording" self.isRecording = False else: cv.PutText(curframe,datetime.now().strftime("%b %d, %H:%M:%S"), (25,30),self.font, 0) #Put date on the frame cv.WriteFrame(self.writer, curframe) #Write the frame if self.show: cv.ShowImage("Image", curframe) cv.ShowImage("Res", self.res) cv.Copy(self.frame2gray, self.frame1gray) c=cv.WaitKey(1) if c==27 or c == 1048603: #Break if user enters 'Esc'. break
def run(self): started = time.time() while True: curframe = cv.QueryFrame(self.capture) instant = time.time() #Get timestamp o the frame self.processImage(curframe) #Process the image if self.somethingHasMoved(): self.trigger_time = instant #Update the trigger_time if instant > started + 5: #Wait 5 second after the webcam start for luminosity adjusting etc.. print datetime.now().strftime( "%b %d, %H:%M:%S"), "Something is moving !" os.system("cvlc --play-and-exit --fullscreen ./movie.mp4") started = time.time() if self.show: cv.ShowImage("Image", curframe) cv.ShowImage("Res", self.res) cv.Copy(self.frame2gray, self.frame1gray) c = cv.WaitKey(1) % 0x100 if c == 27 or c == 10: #Break if user enters 'Esc'. break
def Xihua(image, array, num=10): iXihua = cv.CreateImage(cv.GetSize(image), 8, 1) cv.Copy(image, iXihua) for i in range(num): VThin(iXihua, array) HThin(iXihua, array) return iXihua
def run(self): hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0, 180)], 1) backproject_mode = True while True: frame = cv.QueryFrame(self.capture) # Convert to HSV and keep the hue hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.Split(hsv, self.hue, None, None, None) # Compute back projection backproject = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CalcArrBackProject([self.hue], backproject, hist) # Run the cam-shift (if the a window is set and != 0) if self.track_window and is_rect_nonzero(self.track_window): crit = (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1) (iters, (area, value, rect), track_box) = cv.CamShift(backproject, self.track_window, crit) #Call the camshift !! self.track_window = rect #Put the current rectangle as the tracked area # If mouse is pressed, highlight the current selected rectangle and recompute histogram if self.drag_start and is_rect_nonzero(self.selection): sub = cv.GetSubRect(frame, self.selection) #Get specified area #Make the effect of background shadow when selecting a window save = cv.CloneMat(sub) cv.ConvertScale(frame, frame, 0.5) cv.Copy(save, sub) #Draw temporary rectangle x, y, w, h = self.selection cv.Rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255)) #Take the same area but in hue image to calculate histogram sel = cv.GetSubRect(self.hue, self.selection) cv.CalcArrHist([sel], hist, 0) #Used to rescale the histogram with the max value (to draw it later on) (_, max_val, _, _) = cv.GetMinMaxHistValue(hist) if max_val != 0: cv.ConvertScale(hist.bins, hist.bins, 255. / max_val) elif self.track_window and is_rect_nonzero( self.track_window): #If window set draw an elipseBox cv.EllipseBox(frame, track_box, cv.CV_RGB(255, 0, 0), 3, cv.CV_AA, 0) cv.ShowImage("CamShiftDemo", frame) cv.ShowImage("Backprojection", backproject) cv.ShowImage("Histogram", self.hue_histogram_as_image(hist)) c = cv.WaitKey(7) % 0x100 if c == 27: break
def load_tile_lowres(self, tile): '''load a lower resolution tile from cache to fill in a map while waiting for a higher resolution tile''' if tile.zoom == self.min_zoom: return None # find the equivalent lower res tile (lat, lon) = tile.coord() width2 = TILES_WIDTH height2 = TILES_HEIGHT for zoom2 in range(tile.zoom - 1, self.min_zoom - 1, -1): width2 /= 2 height2 /= 2 if width2 == 0 or height2 == 0: break tile_info = self.coord_to_tile(lat, lon, zoom2) # see if its in the tile cache key = tile_info.key() if key in self._tile_cache: img = self._tile_cache[key] if img == self._unavailable: continue else: path = self.tile_to_path(tile_info) try: img = cv.LoadImage(path) # add it to the tile cache self._tile_cache[key] = img while len(self._tile_cache) > self.cache_size: self._tile_cache.popitem(0) except IOError as e: continue # copy out the quadrant we want availx = min(TILES_WIDTH - tile_info.offsetx, width2) availy = min(TILES_HEIGHT - tile_info.offsety, height2) if availx != width2 or availy != height2: continue cv.SetImageROI( img, (tile_info.offsetx, tile_info.offsety, width2, height2)) img2 = cv.CreateImage((width2, height2), 8, 3) try: cv.Copy(img, img2) except Exception: continue cv.ResetImageROI(img) # and scale it scaled = cv.CreateImage((TILES_WIDTH, TILES_HEIGHT), 8, 3) cv.Resize(img2, scaled) #cv.Rectangle(scaled, (0,0), (255,255), (0,255,0), 1) return scaled return None
def run(self): hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0, 180)], 1) backproject_mode = False while True: frame = 0 frame = self.capture #cv.QueryFrame( self.capture ) # Convert to HSV and keep the hue hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.Split(hsv, self.hue, None, None, None) # Compute back projection backproject = cv.CreateImage(cv.GetSize(frame), 8, 1) # Run the cam-shift cv.CalcArrBackProject([self.hue], backproject, hist) # if self.track_window and is_rect_nonzero(self.track_window): # crit = ( cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1) # (iters, (area, value, rect), track_box) = cv.CamShift(backproject, self.track_window, crit) # self.track_window = rect # If mouse is pressed, highlight the current selected rectangle # and recompute the histogram if self.drag_start and is_rect_nonzero(self.selection): sub = cv.GetSubRect(frame, self.selection) save = cv.CloneMat(sub) #cv.ConvertScale(frame, frame, 0.5) cv.Copy(save, sub) x, y, w, h = self.selection cv.Rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255)) sel = cv.GetSubRect(self.hue, self.selection) cv.CalcArrHist([sel], hist, 0) (_, max_val, _, _) = cv.GetMinMaxHistValue(hist) if max_val != 0: cv.ConvertScale(hist.bins, hist.bins, 255. / max_val) # elif self.track_window and is_rect_nonzero(self.track_window): # cv.EllipseBox( frame, track_box, cv.CV_RGB(255,0,0), 3, cv.CV_AA, 0 ) if not backproject_mode: cv.ShowImage("SelectROI", frame) else: cv.ShowImage("SelectROI", backproject) cv.ShowImage("Histogram", self.hue_histogram_as_image(hist)) c = cv.WaitKey(7) % 0x100 if c == 27: f = open('newtree.yaml', "w") yaml.dump(self.selection, f) f.close() break elif c == ord("b"): backproject_mode = not backproject_mode
def crop(self, image, subRect): cv.SetImageROI(image, subRect) img2 = cv.CreateImage(cv.GetSize(image), image.depth, image.nChannels) #img3 = cv.CreateImage(cv.GetSize(image), image.depth, image.nChannels); cv.Copy(image, img2) #img3 = self.equilizer.normalize(img2) cv.ResetImageROI(image) return img2
class FaceDetect: def __init__(self, DEBUG=False): # Constructor for the videocapture, 0 because there is only one webcam connected # instead of 0, a video file could be passed as parameter self.vidCap = cv2.VideoCapture(0) #self.vidCap = cv2.VideoCapture(VIDEO_FILE) self.DEBUG = DEBUG print 'FaceDetect : DEBUG set to', self.DEBUG def getFrame(self): _, frame = self.vidCap.read( ) # tuple contains a return value and image return frame def detectFaces(self, img): img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # image to grayscale img = cv2.equalizeHist(img) # image equalized # cascade classifier for object detectio # HAAR_FRONTAL_FACE is the path to the file from which the classifier is loaded casClassif = cv2.CascadeClassifier(HAAR_FRONTAL_FACE) rectangles = casClassif.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30), flags=cv.CV_HAAR_SCALE_IMAGE) if len(rectangles) == 0: return if len(rectangles) > 1: for i in range(0, len(rectangles)): re = rectangles[i] if self.DEBUG: print re re[:, :2] += re[:, 2] rectangles[i] = re else: rectangles[:, :2] += rectangles[:, 2] return rectangles def cropRectangle(self, (x1, y1), (x2, y2), original): width = x2 - x1 height = y2 - y1 size = (width, height) if self.DEBUG: print 'W:', width, 'H:', height cropped = cv.CreateImage( (size), 8, 3) # CreateImage( CvSize size, int depth, int channels ) src_region = cv.GetSubRect( cv.fromarray(original), (x1, y1, width, height)) # GetSubRect( img, (pos_left, pos_top, width, height) ) cv.Copy(src_region, cropped) cropped = np.asarray(cropped[:, :]) return cropped
def get_working(self): (width, height) = cv.GetSize(self.image) dest = cv.CreateMat(height, width, cv.CV_8UC3) mask8x1 = cv.CreateImage(cv.GetSize(self.image), 8, 1) cv.Zero(mask8x1) cv.FillConvexPoly(mask8x1, self.cur_contour, cv.ScalarAll(255)) # Could 8x3 mask copy but histogram mask will take care of it cv.Copy(self.image, dest) return (dest, mask8x1)
def resize(self, frame): img = cv.GetMat(frame) for _ in range(self.resize_num): w, h = cv.GetSize(img) small = cv.CreateMat((h + 1) / 2, (w + 1) / 2, img.type) cv.PyrDown(img, small) img = cv.CreateMat((h + 1) / 2, (w + 1) / 2, img.type) cv.Copy(small, img) return small
def cvShiftDFT(src_arr, dst_arr): size = cv.GetSize(src_arr) dst_size = cv.GetSize(dst_arr) if dst_size != size: cv.Error(cv.CV_StsUnmatchedSizes, "cv.ShiftDFT", "Source and Destination arrays must have equal sizes", __FILE__, __LINE__) if (src_arr is dst_arr): tmp = cv.CreateMat(size[1] / 2, size[0] / 2, cv.GetElemType(src_arr)) cx = size[0] / 2 cy = size[1] / 2 # image center q1 = cv.GetSubRect(src_arr, (0, 0, cx, cy)) q2 = cv.GetSubRect(src_arr, (cx, 0, cx, cy)) q3 = cv.GetSubRect(src_arr, (cx, cy, cx, cy)) q4 = cv.GetSubRect(src_arr, (0, cy, cx, cy)) d1 = cv.GetSubRect(src_arr, (0, 0, cx, cy)) d2 = cv.GetSubRect(src_arr, (cx, 0, cx, cy)) d3 = cv.GetSubRect(src_arr, (cx, cy, cx, cy)) d4 = cv.GetSubRect(src_arr, (0, cy, cx, cy)) if (src_arr is not dst_arr): if (not cv.CV_ARE_TYPES_EQ(q1, d1)): cv.Error( cv.CV_StsUnmatchedFormats, "cv.ShiftDFT", "Source and Destination arrays must have the same format", __FILE__, __LINE__) cv.Copy(q3, d1) cv.Copy(q4, d2) cv.Copy(q1, d3) cv.Copy(q2, d4) else: cv.Copy(q3, tmp) cv.Copy(q1, q3) cv.Copy(tmp, q1) cv.Copy(q4, tmp) cv.Copy(q2, q4) cv.Copy(tmp, q2)
def find_rectangles(self,input_img): """ Find contours in the input image. input_img: Is a binary image """ contours_img=cv.CreateMat(input_img.height, input_img.width, cv.CV_8UC1) # Image to draw the contours copied_img=cv.CreateMat(input_img.height, input_img.width, input_img.type) # Image to draw the contours cv.Copy(input_img, copied_img) contours = cv.FindContours(copied_img,cv.CreateMemStorage(),cv.CV_RETR_TREE,cv.CV_CHAIN_APPROX_SIMPLE) cv.DrawContours(contours_img,contours,255,0,10) return contours_img
def test_2686307(self): lena = cv.LoadImage(find_sample("lena.jpg"), 1) dst = cv.CreateImage((512,512), 8, 3) cv.Set(dst, (128,192,255)) mask = cv.CreateImage((512,512), 8, 1) cv.Zero(mask) cv.Rectangle(mask, (10,10), (300,100), 255, -1) cv.Copy(lena, dst, mask) self.snapL([lena, dst, mask]) m = cv.CreateMat(480, 640, cv.CV_8UC1) print "ji", m print m.rows, m.cols, m.type, m.step
def rgb2gray(img, grayscale): ''' convert img to grayscale @param img: image to convert @param grayscale: target ''' # cv.CvtColor(img,self.img,cv.CV_RGB2HLS) cv.CvtColor(img, grayscale, cv.CV_RGB2GRAY) return cv.SetImageCOI(img, 2) cv.Copy(img, grayscale) cv.SetImageCOI(img, 0)
def find_squares4(color_img): """ Finds multiple squares in image Steps: -Use Canny edge to highlight contours, and dilation to connect the edge segments. -Threshold the result to binary edge tokens -Use cv.FindContours: returns a cv.CvSequence of cv.CvContours -Filter each candidate: use Approx poly, keep only contours with 4 vertices, enough area, and ~90deg angles. Return all squares contours in one flat list of arrays, 4 x,y points each. """ #select even sizes only width, height = (color_img.width & -2, color_img.height & -2) timg = cv.CloneImage(color_img) # make a copy of input image gray = cv.CreateImage((width, height), 8, 1) # select the maximum ROI in the image cv.SetImageROI(timg, (0, 0, width, height)) # down-scale and upscale the image to filter out the noise pyr = cv.CreateImage((width / 2, height / 2), 8, 3) cv.PyrDown(timg, pyr, 7) cv.PyrUp(pyr, timg, 7) tgray = cv.CreateImage((width, height), 8, 1) squares = [] # Find squares in every color plane of the image # Two methods, we use both: # 1. Canny to catch squares with gradient shading. Use upper threshold # from slider, set the lower to 0 (which forces edges merging). Then # dilate canny output to remove potential holes between edge segments. # 2. Binary thresholding at multiple levels N = 11 for c in [0, 1, 2]: #extract the c-th color plane cv.SetImageCOI(timg, c + 1) cv.Copy(timg, tgray, None) cv.Canny(tgray, gray, 0, 50, 5) cv.Dilate(gray, gray) squares = squares + find_squares_from_binary(gray) # Look for more squares at several threshold levels for l in range(1, N): cv.Threshold(tgray, gray, (l + 1) * 255 / N, 255, cv.CV_THRESH_BINARY) squares = squares + find_squares_from_binary(gray) return squares
def compute_ref(self): '''Compute a reference histogram that matched regions should approximate''' (image, polygon) = self.get_ref() (width, height) = cv.GetSize(image) # (rows, cols,...) dest = cv.CreateMat(height, width, cv.CV_8UC3) mask8x1 = cv.CreateImage(cv.GetSize(image), 8, 1) cv.Zero(mask8x1) cv.FillConvexPoly(mask8x1, polygon, cv.ScalarAll(255)) cv.Copy(image, dest) self.ref_hist = hs_histogram(dest, mask8x1)
def detect_and_draw(self, imgmsg): if self.pause: return # frame = cv.QueryFrame( self.capture ) frame = self.br.imgmsg_to_cv(imgmsg, "bgr8") # Convert to HSV and keep the hue hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.Split(hsv, self.hue, None, None, None) # Compute back projection backproject = cv.CreateImage(cv.GetSize(frame), 8, 1) # Run the cam-shift cv.CalcArrBackProject([self.hue], backproject, self.hist) if self.track_window and is_rect_nonzero(self.track_window): crit = (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1) (iters, (area, value, rect), track_box) = cv.CamShift(backproject, self.track_window, crit) self.track_window = rect x, y, w, h = rect self.bbpub.publish(RegionOfInterest(x, y, w, h, False)) proba_msg = self.br.cv_to_imgmsg(backproject) proba_msg.header = imgmsg.header self.bppub.publish(proba_msg) # If mouse is pressed, highlight the current selected rectangle # and recompute the histogram if self.drag_start and is_rect_nonzero(self.selection): sub = cv.GetSubRect(frame, self.selection) save = cv.CloneMat(sub) cv.ConvertScale(frame, frame, 0.5) cv.Copy(save, sub) x, y, w, h = self.selection cv.Rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255)) sel = cv.GetSubRect(self.hue, self.selection) cv.CalcArrHist([sel], self.hist, 0) (_, max_val, _, _) = cv.GetMinMaxHistValue(self.hist) if max_val != 0: cv.ConvertScale(self.hist.bins, self.hist.bins, 255. / max_val) elif self.track_window and is_rect_nonzero(self.track_window): cv.EllipseBox(frame, track_box, cv.CV_RGB(255, 0, 0), 3, cv.CV_AA, 0) self.frame = frame self.backproject = backproject
def cvCopy(src, dst, mask=None): width1 = cv.GetImageROI(src)[2] height1 = cv.GetImageROI(src)[3] width2 = cv.GetImageROI(dst)[2] height2 = cv.GetImageROI(dst)[3] if not (src.depth == dst.depth and width1 == width2 and height1 == height2): print 'cvCopy argument error' print (width1, height1, src.depth), (width2, height2, dst.depth) raise RuntimeError cv.Copy(src, dst, mask) return
def blend_views(bldIm, frame, mask, frec, max_rec): maskMat = cv.fromarray(mask) dispX = int(np.round(frec.left - max_rec.left)) dispY = int(np.round(frec.top - max_rec.top)) bldROI = cv.GetImage(bldIm) cv.SetImageROI(bldROI, (dispX, dispY, int(np.round( frec.width())), int(np.round(frec.height())))) cv.Add(bldROI, cv.fromarray(frame), bldROI, maskMat) cv.ResetImageROI(bldROI) cv.Copy(bldROI, bldIm) return bldIm
def process_image(self): """Process the image from the camera in order to remove noise.""" cv.CvtColor(self.colorFrame, self.currentGrayFrame, cv.CV_RGB2GRAY) # remove noise, etc. self.currentGrayFrame = self.reduce_image_noise(self.currentGrayFrame) # find the difference between the current and previous frame cv.AbsDiff(self.currentGrayFrame, self.previousGrayFrame, self.resultImage) # calculate the binary image that shows where there is change in the image cv.Threshold(self.resultImage, self.resultImage, 10, 255, cv.CV_THRESH_BINARY_INV) cv.Copy(self.currentGrayFrame, self.previousGrayFrame)
def on_trackbar(position): cv.Smooth(gray, edge, cv.CV_BLUR, 3, 3, 0) cv.Not(gray, edge) # run the edge dector on gray scale cv.Canny(gray, edge, position, position * 3, 3) # reset cv.SetZero(col_edge) # copy edge points cv.Copy(im, col_edge, edge) # show the im cv.ShowImage(win_name, col_edge)
def on_trackbar(self, position): cv.Smooth(self.source_image, self.edge, cv.CV_BLUR, 3, 3, 0) cv.Not(self.source_image, self.edge) # run the edge dector on gray scale cv.Canny(self.source_image, self.edge, position, position * 3, 3) # reset cv.SetZero(self.col_edge) # copy edge points cv.Copy(self.source_color, self.col_edge, self.edge) # show the im cv.ShowImage(win_name, self.col_edge) self.process_image(position)
def addText(self, frame, textTop, textBottom): s = cv.GetSize(frame) offset = 8 ## add space for text notations textSize = cv.GetTextSize(textTop, self._font) textframe = cv.CreateImage( (s[0], s[1] + 4*textSize[1] + 2*offset), frame.depth, frame.channels) cv.Set(textframe, 0) cv.SetImageROI(textframe, (0, 2*textSize[1] + offset, s[0], s[1])) cv.Copy(frame, textframe) cv.ResetImageROI(textframe) ## write text cv.PutText(textframe, textTop, (5, 2*textSize[1] + offset/2), self._font, self._fontcolor) cv.PutText(textframe, textBottom, (5, int(s[1] + 4*textSize[1] + 1.5 * offset)), self._font, self._fontcolor) return textframe
def findImage(img): #Set up storage for images frame_size = cv.GetSize(img) img2 = cv.CreateImage(frame_size,8,3) tmp = cv.CreateImage(frame_size,8,cv.CV_8U) h = cv.CreateImage(frame_size,8,1) #copy original image to do work on cv.Copy(img,img2) #altering the image a bit for smoother processing cv.Smooth(img2,img2,cv.CV_BLUR,3) cv.CvtColor(img2,img2,cv.CV_BGR2HSV) #make sure temp is empty cv.Zero(tmp) #detection based on HSV value #30,100,90 lower limit on pic 41,255,255 on pic #cv.InRangeS(img2,cv.Scalar(25,100,87),cv.Scalar(50,255,255),tmp) #Range for green plate dot in my Living room #cv.InRangeS(img2,cv.Scalar(55,80,60),cv.Scalar(65,95,90),tmp) #classroom #cv.InRangeS(img2,cv.Scalar(55,80,60),cv.Scalar(70,110,70),tmp) #Kutztowns Gym cv.InRangeS(img2,cv.Scalar(65,100,112),cv.Scalar(85,107,143),tmp) elmt_shape=cv.CV_SHAPE_ELLIPSE pos = 3 element = cv.CreateStructuringElementEx(pos*2+1, pos*2+1, pos, pos, elmt_shape) cv.Dilate(tmp,tmp,element,6) cv.Erode(tmp,tmp,element,2) cv.Split(tmp,h,None,None,None) storage = cv.CreateMemStorage() scan = sc.FindContours(h,storage) xyImage=drawCircles(scan,img) if xyImage != None: return (xyImage,tmp) else: return None