def getDepth(self, image, image2): grayScaleFullImage = cv.CreateImage((image.width, image.height), 8, 1) cv.CvtColor(image, grayScaleFullImage, cv.CV_BGR2GRAY) grayScaleFullImage2 = cv.CreateImage((image2.width, image2.height), 8, 1) cv.CvtColor(image2, grayScaleFullImage2, cv.CV_BGR2GRAY) [mat_w, mat_h] = self.size r = cv.CreateMat(mat_h, mat_w, cv.CV_8UC1) r2 = cv.CreateMat(mat_h, mat_w, cv.CV_8UC1) print type(r) print type(image) print type(self.map1x) print cv.GetSize(r) print cv.GetSize(self.map1x) cv.Remap(grayScaleFullImage, r, self.map1x, self.map1y) cv.Remap(grayScaleFullImage2, r2, self.map2x, self.map2y) cv.ShowImage("win3", r) cv.ShowImage("win4", r2) #stereo_match that comes in opencv # disparity range is tuned for 'aloe' image pair window_size = 3 min_disp = 16 num_disp = 112 - min_disp stereo = cv2.StereoSGBM(minDisparity=min_disp, numDisparities=num_disp, SADWindowSize=window_size, uniquenessRatio=10, speckleWindowSize=100, speckleRange=32, disp12MaxDiff=1, P1=8 * 3 * window_size ** 2, P2=32 * 3 * window_size ** 2, fullDP=False ) print 'computing disparity...' disp = stereo.compute(np.asarray(r), np.asarray(r2)).astype(np.float32) / 16.0 print 'generating 3d point cloud...' points = cv2.reprojectImageTo3D(disp, np.asarray(self.Q)) colors = cv2.cvtColor(np.asarray(r), cv2.COLOR_GRAY2RGB) mask = disp > disp.min() out_points = points[mask] out_colors = colors[mask] # Resulting .ply file cam be easily viewed using MeshLab ( http://meshlab.sourceforge.net out_fn = 'out.ply' write_ply('out.ply', out_points, out_colors) print '%s saved' % 'out.ply' cv2.imshow('disparity', (disp - min_disp) / num_disp)
def collectCheckboardPoints(self): self.pointsArray1 = np.zeros((nimages, num_pts, 2)) self.pointsArray2 = np.zeros((nimages, num_pts, 2)) cv.NamedWindow("camera") cv.NamedWindow("camera2") i = 0 while True : frame = cv.QueryFrame(self.video1) # print type(frame) # [rows1, cols] = cv.GetSize(frame) image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, frame.nChannels) cv.Copy(frame, image) cv.ShowImage("camera", frame) grayScaleFullImage = cv.CreateImage((image.width, image.height), 8, 1) cv.CvtColor(image, grayScaleFullImage, cv.CV_BGR2GRAY) frame2 = cv.QueryFrame(self.video2) image2 = cv.CreateImage(cv.GetSize(frame2), cv.IPL_DEPTH_8U, frame2.nChannels) cv.Copy(frame2, image2) cv.ShowImage("camera2", frame2) grayScaleFullImage2 = cv.CreateImage((image2.width, image2.height), 8, 1) cv.CvtColor(image2, grayScaleFullImage2, cv.CV_BGR2GRAY) found, points = cv.FindChessboardCorners(grayScaleFullImage, dims, cv.CV_CALIB_CB_ADAPTIVE_THRESH) if found != 0: print "found chess board " + str(np.shape(points)) cv.DrawChessboardCorners(image, dims, points, found) cv.ShowImage("win2", image) cv.WaitKey(2) # else: # print "no chess" found2, points2 = cv.FindChessboardCorners(grayScaleFullImage2, dims, cv.CV_CALIB_CB_ADAPTIVE_THRESH) if found2 != 0: print "found chess board2" cv.DrawChessboardCorners(image2, dims, points2, found2) cv.ShowImage("win3", image2) cv.WaitKey(2) if found and found2: print "entered here!!!!!" self.pointsArray1[i, :] = points self.pointsArray2[i, :] = points2 i = i + 1 if i == nimages: self.size = cv.GetSize(image) break if cv.WaitKey(10) == 27: break cv.DestroyWindow("Camera 1") cv.DestroyWindow("Camera 2")
def show_frame(self): color_image = cv.QueryFrame(self.capture) color_image1 = cv.CreateImage(cv.GetSize(color_image), 8, 3) grey_image = cv.CreateImage(cv.GetSize(color_image), cv.IPL_DEPTH_8U, 1) moving_average = cv.CreateImage(cv.GetSize(color_image), cv.IPL_DEPTH_32F, 3) grey = cv.CreateImage(cv.GetSize(color_image), 8, 3) HSV = cv.CreateImage(cv.GetSize(color_image), 8, 3) red = cv.CreateImage(cv.GetSize(color_image), 8, 3) cv.CvtColor(color_image, grey, cv.CV_RGB2HLS) cv.CvtColor(color_image, HSV, cv.CV_RGB2HSV) cv.Not(color_image, red) cv.ShowImage(self.window1, color_image) cv.ShowImage(self.window2, grey) cv.ShowImage(self.window3, HSV) cv.ShowImage(self.window4, red) cv.MoveWindow(self.window1, 30, 120) cv.MoveWindow(self.window2, 430, 120) cv.MoveWindow(self.window3, 430, 470) cv.MoveWindow(self.window4, 30, 470) while self.arduino.inWaiting() > 0: self.data += self.arduino.read(1)
def repeat(): global capture #declare as globals since we are assigning to them now global camera_index global done frame = cv.QueryFrame(capture) cv.Smooth(frame, frame, cv.CV_GAUSSIAN, 3, 3) imgHsv = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(frame, imgHsv, cv.CV_BGR2HSV) #imgHsv2 = GetThresholdedImage(imgHsv) #print(numpy.asarray(cv.GetMat(imgHsv))) imgRGBA = cv.CreateImage(cv.GetSize(frame), 8, 4) cv.CvtColor(frame, imgRGBA, cv.CV_BGR2RGBA) cv.Smooth(imgRGBA, imgRGBA, cv.CV_GAUSSIAN, 3, 3) (filteredImg, offsetX, offsetY) = parallelSumRed(imgRGBA, 640, 480) #3D array d = numpy.sqrt(offsetX * offsetX + offsetY * offsetY) if d != 0: print("Distance = " + str(c1 / d + c2) + "cm") print("OffsetX = " + str(offsetX) + "; OffsetY = " + str(offsetY)) print("") imgRGB = cv.CreateImage(cv.GetSize(frame), 8, 3) #cv.CvtColor(Image.fromarray(filteredImg), imgRGB, cv.CV_RGBA2RGB) imgRGBA = cv.fromarray(numpy.reshape(filteredImg, (480, 640, 4))) if offsetX != 0 or offsetY != 0: cv.Rectangle(imgRGBA, (320 + offsetX - 6, 240 + offsetY - 6), (320 + offsetX + 6, 240 + offsetY + 6), (255, 0, 255, 255), 1, 8) cv.Line(imgRGBA, (0, 240 + offsetY), (639, 240 + offsetY), (255, 0, 255, 255), 1, 8) cv.Line(imgRGBA, (320 + offsetX, 0), (320 + offsetX, 479), (255, 0, 255, 255), 1, 8) cv.ShowImage(HSVWindow, imgRGBA) cv.ShowImage(original, frame) cv.SetMouseCallback(original, onMouseMove, [ cv.CV_EVENT_MOUSEMOVE, numpy.asarray(cv.GetMat(imgHsv)), numpy.asarray(cv.GetMat(frame)) ]) #cv.SetMouseCallback(HSVWindow, onMouseMove, [cv.CV_EVENT_MOUSEMOVE, numpy.asarray(cv.GetMat(imgHsv)), numpy.asarray(cv.GetMat(frame))]) #cv.ShowImage(filtered, imgHsv2) c = cv.WaitKey(10) if (str(c) == "27"): #if ESC is pressed print("Thank You!") done = True if (str(c) == "99"): #'c' for calibration calibration(int(input("How many data points: ")))
def _mixImageSelfMaskHue(self, wipeSettings, level, image1, image2, mixMat): cv.CvtColor(image1, mixMat, cv.CV_RGB2HSV) cv.Split(mixMat, self._mixMixMask1, None, None, None) cv.CvtColor(image2, mixMat, cv.CV_RGB2HSV) cv.Split(mixMat, self._mixMixMask2, None, None, None) cv.Sub(self._mixMixMask2, self._mixMixMask1, self._mixImageMask) cv.CmpS(self._mixImageMask, 255-int((level*254)), self._mixImageMask, cv.CV_CMP_GT) return self._mixImageAlphaMask(wipeSettings, level, image1, image2, self._mixImageMask, mixMat)
def run(self): hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0, 180)], 1) backproject_mode = False while True: frame = cv.QueryFrame(self.capture) cv.Flip(frame, frame, 0) # Convert to HSV and keep the hue hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(frame, frame, cv.CV_RGB2BGR) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) #cv.CvtColor(frame, hsv, cv.CV_RGB2HSV) self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.Split(hsv, self.hue, None, None, None) # Compute back projection backproject = cv.CreateImage(cv.GetSize(frame), 8, 1) # Run the cam-shift cv.CalcArrBackProject([self.hue], backproject, hist) if self.track_window and is_rect_nonzero(self.track_window): crit = (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1) (iters, (area, value, rect), track_box) = cv.CamShift(backproject, self.track_window, crit) self.track_window = rect # If mouse is pressed, highlight the current selected rectangle # and recompute the histogram if self.drag_start and is_rect_nonzero(self.selection): sub = cv.GetSubRect(frame, self.selection) save = cv.CloneMat(sub) cv.ConvertScale(frame, frame, 0.5) cv.Copy(save, sub) x, y, w, h = self.selection cv.Rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255)) sel = cv.GetSubRect(self.hue, self.selection) cv.CalcArrHist([sel], hist, 0) (_, max_val, _, _) = cv.GetMinMaxHistValue(hist) if max_val != 0: cv.ConvertScale(hist.bins, hist.bins, 255. / max_val) elif self.track_window and is_rect_nonzero(self.track_window): cv.EllipseBox(frame, track_box, cv.CV_RGB(255, 0, 0), 3, cv.CV_AA, 0) if not backproject_mode: cv.ShowImage("CamShiftDemo", frame) else: cv.ShowImage("CamShiftDemo", backproject) cv.ShowImage("Histogram", self.hue_histogram_as_image(hist)) c = cv.WaitKey(7) % 0x100 if c == 27: break elif c == ord("b"): backproject_mode = not backproject_mode
def OnPaint(self, evt): if not self.timer.IsRunning() : dc = wx.BufferedDC(wx.ClientDC(self), wx.NullBitmap, wx.BUFFER_VIRTUAL_AREA) dc.SetBackground(wx.Brush(wx.Colour(0, 0, 0))) return # Capture de l'image frame = cv.QueryFrame(CAMERA) cv.CvtColor(frame, frame, cv.CV_BGR2RGB) Img = wx.EmptyImage(frame.width, frame.height) Img.SetData(frame.tostring()) self.bmp = wx.BitmapFromImage(Img) width, height = frame.width, frame.height # Détection des visages min_size = (20, 20) image_scale = 2 haar_scale = 1.2 min_neighbors = 2 haar_flags = 0 gray = cv.CreateImage((frame.width, frame.height), 8, 1) small_img = cv.CreateImage((cv.Round(frame.width / image_scale), cv.Round (frame.height / image_scale)), 8, 1) cv.CvtColor(frame, gray, cv.CV_BGR2GRAY) cv.Resize(gray, small_img, cv.CV_INTER_LINEAR) cv.EqualizeHist(small_img, small_img) listeVisages = cv.HaarDetectObjects(small_img, CASCADE, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size) # Affichage de l'image x, y = (0, 0) try: dc = wx.BufferedDC(wx.ClientDC(self), wx.NullBitmap, wx.BUFFER_VIRTUAL_AREA) try : dc.SetBackground(wx.Brush(wx.Colour(0, 0, 0))) except : pass dc.Clear() dc.DrawBitmap(self.bmp, x, y) # Dessin des rectangles des visages if listeVisages : for ((x, y, w, h), n) in listeVisages : dc.SetBrush(wx.TRANSPARENT_BRUSH) dc.SetPen(wx.Pen(wx.Colour(255, 0, 0), 2)) dc.DrawRectangle(x* image_scale, y* image_scale, w* image_scale, h* image_scale) self.listeVisages = listeVisages del dc del Img except TypeError: pass except wx.PyDeadObjectError: pass
def detect_and_draw(img, cascade): # allocate temporary images gray = cv.CreateImage((img.width, img.height), 8, 1) small_img = cv.CreateImage((cv.Round(img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1) # convert color input image to grayscale cv.CvtColor(img, gray, cv.CV_BGR2GRAY) # scale input image for faster processing cv.Resize(gray, small_img, cv.CV_INTER_LINEAR) cv.EqualizeHist(small_img, small_img) if(cascade): t = cv.GetTickCount() faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size) t = cv.GetTickCount() - t print "detection time = %gms" % (t / (cv.GetTickFrequency() * 1000.)) if faces: for ((x, y, w, h), n) in faces: # the input to cv.HaarDetectObjects was resized, so scale the # bounding box of each face and convert it to two CvPoints pt1 = (int(x * image_scale), int(y * image_scale)) pt2 = (int((x + w) * image_scale), int((y + h) * image_scale)) cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0) cv.ShowImage("result", img)
def preprocessing(im): gray = cv.CreateImage((im.width, im.height), 8, 1) out = cv.CreateImage((im.width,im.height),cv.IPL_DEPTH_8U,1) cv.CvtColor(im,gray,cv.CV_BGR2GRAY) cv.Threshold(gray,out,110,255,cv.CV_THRESH_BINARY_INV) return gray '''
def genFeatres(img_list): network = slminit() filenames = img_list index = 0 faceVectors = [] for img in filenames: entry1 = img src1 = cv.LoadImageM(entry1) gray_full1 = cv.CreateImage(cv.GetSize(src1), 8, 1) grayim1 = cv.CreateImage((200, 200), 8, 1) cv.CvtColor(src1, gray_full1, cv.CV_BGR2GRAY) cv.Resize(gray_full1, grayim1, interpolation=cv.CV_INTER_CUBIC) gray1 = cv.GetMat(grayim1) im_array1 = np.asarray(gray1).astype('f') # -- compute feature map, shape [height, width, depth] f_map1 = slmprop(im_array1, network) f_map_dims1 = f_map1.shape image_vector = [] for j in range(f_map_dims1[0]): for k in range(f_map_dims1[1]): for l in range(f_map_dims1[2]): image_vector.append(f_map1[j][k][l]) print index index = index + 1 faceVectors.append(np.asarray(image_vector)) return faceVectors
def run(self): first_frame = True while True: frame = cv.QueryFrame(self.capture) if first_frame: gray = cv.CreateImage(cv.GetSize(frame), 8, 1) prev_gray = cv.CreateImage(cv.GetSize(frame), 8, 1) flow = cv.CreateImage(cv.GetSize(frame), 32, 2) self.cflow = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(frame, gray, cv.CV_BGR2GRAY) if not first_frame: cv.CalcOpticalFlowFarneback(prev_gray, gray, flow, pyr_scale=0.5, levels=3, winsize=15, iterations=3, poly_n=5, poly_sigma=1.2, flags=0) self.draw_flow(flow, prev_gray) c = cv.WaitKey(7) if c in [27, ord('q'), ord('Q')]: break prev_gray, gray = gray, prev_gray first_frame = False
def create_imagefile(filename, latlon, ground_width, path_obj, mission_obj, width=600, height=600): '''create path and mission as an image file''' mt = mp_tile.MPTile(service=opts.service) map_img = mt.area_to_image(latlon[0], latlon[1], width, height, ground_width) while mt.tiles_pending() > 0: print("Waiting on %u tiles" % mt.tiles_pending()) time.sleep(1) map_img = mt.area_to_image(latlon[0], latlon[1], width, height, ground_width) # a function to convert from (lat,lon) to (px,py) on the map pixmapper = functools.partial(pixel_coords, ground_width=ground_width, mt=mt, topleft=latlon, width=width) path_obj.draw(map_img, pixmapper, None) if mission_obj is not None: mission_obj.draw(map_img, pixmapper, None) cv.CvtColor(map_img, map_img, cv.CV_BGR2RGB) cv.SaveImage(filename, map_img)
def __init__(self, parent): wx.Panel.__init__(self, parent) #magic to stop the flickering def SetCompositeMode(self, on=True): exstyle = win32api.GetWindowLong(self.GetHandle(), win32con.GWL_EXSTYLE) if on: exstyle |= win32con.WS_EX_COMPOSITED else: exstyle &= ~win32con.WS_EX_COMPOSITED win32api.SetWindowLong(self.GetHandle(), win32con.GWL_EXSTYLE, exstyle) SetCompositeMode(self, True) #self.capture = cv.CaptureFromCAM(0) # turn on the webcam #img = ImagePro # Convert the raw image data to something wxpython can handle. #cv.CvtColor(img, img, cv.CV_BGR2RGB) # fix color distortions storage = cv.CreateMat(orig.width, 1, cv.CV_32FC3) self.ImagePro(capture, orig, processed, storage, grid) cv.CvtColor(orig, orig, cv.CV_BGR2RGB) self.bmp = wx.BitmapFromBuffer(640, 300, orig.tostring()) sbmp = wx.StaticBitmap(self, -1, bitmap=self.bmp) # Display the resulting image self.playTimer = wx.Timer(self, self.TIMER_PLAY_ID) wx.EVT_TIMER(self, self.TIMER_PLAY_ID, self.onNextFrame) fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS) if fps != 0: self.playTimer.Start(1000 / fps) # every X ms else: self.playTimer.Start(1000 / 15) # assuming 15 fps
def __init__(self, threshold=8, doRecord=True, showWindows=True): self.writer = None self.font = None self.doRecord = doRecord #Either or not record the moving object self.show = showWindows #Either or not show the 2 windows self.frame = None self.capture = cv.CaptureFromCAM(0) self.frame = cv.QueryFrame( self.capture) #Take a frame to init recorder if doRecord: self.initRecorder() self.frame1gray = cv.CreateMat(self.frame.height, self.frame.width, cv.CV_8U) #Gray frame at t-1 cv.CvtColor(self.frame, self.frame1gray, cv.CV_RGB2GRAY) #Will hold the thresholded result self.res = cv.CreateMat(self.frame.height, self.frame.width, cv.CV_8U) self.frame2gray = cv.CreateMat(self.frame.height, self.frame.width, cv.CV_8U) #Gray frame at t self.width = self.frame.width self.height = self.frame.height self.nb_pixels = self.width * self.height self.threshold = threshold self.isRecording = False self.trigger_time = 0 #Hold timestamp of the last detection if showWindows: cv.NamedWindow("Image") cv.CreateTrackbar("Detection treshold: ", "Image", self.threshold, 100, self.onChange)
def detect_and_draw(img, face_cascade): gray = cv.CreateImage((img.width, img.height), 8, 1) image_scale = img.width / smallwidth small_img = cv.CreateImage((cv.Round( img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1) # gray = cv.CreateImage((img.width,img.height), 8, 1) image_scale = img.width / smallwidth # small_img = cv.CreateImage((cv.Round(img.width / image_scale), cv.Round (img.height / image_scale)), 8, 1) # convert color input image to grayscale cv.CvtColor(img, gray, cv.CV_BGR2GRAY) # scale input image for faster processing cv.Resize(gray, small_img, cv.CV_INTER_LINEAR) cv.EqualizeHist(small_img, small_img) faces = cv.HaarDetectObjects(small_img, face_cascade, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size) if opencv_preview and faces: for ((x, y, w, h), n) in faces: # the input to cv.HaarDetectObjects was resized, so scale the # bounding box of each face and convert it to two CvPoints pt1 = (int(x * image_scale), int(y * image_scale)) pt2 = (int((x + w) * image_scale), int((y + h) * image_scale)) cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0) if verbose: print "Face at: ", pt1[0], ",", pt2[0], "\t", pt1[1], ",", pt2[ 1] return True if faces else False
def hs_histogram(src): # Convert to HSV hsv = cv.CreateImage(cv.GetSize(src), 8, 3) cv.CvtColor(src, hsv, cv.CV_BGR2HSV) # Extract the H and S planes h_plane = cv.CreateMat(src.rows, src.cols, cv.CV_8UC1) s_plane = cv.CreateMat(src.rows, src.cols, cv.CV_8UC1) cv.Split(hsv, h_plane, s_plane, None, None) planes = [h_plane, s_plane] h_bins = 30 s_bins = 32 hist_size = [h_bins, s_bins] # hue varies from 0 (~0 deg red) to 180 (~360 deg red again */ h_ranges = [0, 180] # saturation varies from 0 (black-gray-white) to # 255 (pure spectrum color) s_ranges = [0, 255] ranges = [h_ranges, s_ranges] scale = 10 hist = cv.CreateHist([h_bins, s_bins], cv.CV_HIST_ARRAY, ranges, 1) cv.CalcHist([cv.GetImage(i) for i in planes], hist) (_, max_value, _, _) = cv.GetMinMaxHistValue(hist) hist_img = cv.CreateImage((h_bins * scale, s_bins * scale), 8, 3) for h in range(h_bins): for s in range(s_bins): bin_val = cv.QueryHistValue_2D(hist, h, s) intensity = cv.Round(bin_val * 255 / max_value) cv.Rectangle(hist_img, (h * scale, s * scale), ((h + 1) * scale - 1, (s + 1) * scale - 1), cv.RGB(intensity, intensity, intensity), cv.CV_FILLED) return hist_img
def capture(): """ Using the intel training set to capture the face in the video. Most of them are frameworks in OpenCV. """ j = 0 g = os.walk("origin") for path, d, filelist in g: for filename in filelist: img = cv.LoadImage(os.path.join(path, filename)) image_size = cv.GetSize(img) greyscale = cv.CreateImage(image_size, 8, 1) cv.CvtColor(img, greyscale, cv.CV_BGR2GRAY) storage = cv.CreateMemStorage(0) cv.EqualizeHist(greyscale, greyscale) cascade = cv.Load('haarcascade_frontalface_alt2.xml') faces = cv.HaarDetectObjects(greyscale, cascade, storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING, (50, 50)) for (x, y, w, h), n in faces: j += 1 cv.SetImageROI(img, (x, y, w, h)) cv.SaveImage("captured/face" + str(j) + ".png", img)
def detectFace(img, cascade): # allocate temporary images gray = cv.CreateImage((img.width, img.height), 8, 1) small_img = cv.CreateImage( (cv.Round(img.width / imageScale), cv.Round(img.height / imageScale)), 8, 1) # convert color input image to grayscale cv.CvtColor(img, gray, cv.CV_BGR2GRAY) # scale input image for faster processing cv.Resize(gray, small_img, cv.CV_INTER_LINEAR) cv.EqualizeHist(small_img, small_img) faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0), haarScale, minNeighbors, haarFlags, minSize) if faces: print "\tDetected ", len(faces), " object(s)" for ((x, y, w, h), n) in faces: #the input to cv.HaarDetectObjects was resized, scale the #bounding box of each face and convert it to two CvPoints pt1 = (int(x * imageScale), int(y * imageScale)) pt2 = (int((x + w) * imageScale), int((y + h) * imageScale)) cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0) return img else: return False
def cannyGradient(self, image, t1=20, t2=250): '''Returns the canny gradient''' #Checks whether inputs are correct if self.image_check(image) < 0: return -1 #Converts the image if it is not B&W gsimage = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_8U, 1) if image.channels > 1: temp = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_8U, 1) cv.CvtColor(image, temp, cv.CV_BGR2GRAY) gsimage = temp else: gsimage = image #Gets the edges from the image edges = cv.CreateImage(cv.GetSize(gsimage), cv.IPL_DEPTH_8U, 1) #Warning: the threshold 1 and threshold 2 should be selected by experiment cv.Canny(gsimage, edges, threshold1=t1, threshold2=t2) if self.visualize: while True: cv.NamedWindow("Original") cv.ShowImage("Original", gsimage) cv.NamedWindow("Edges") cv.ShowImage("Edges", edges) c = cv.WaitKey(5) if c > 0: break cv.DestroyAllWindows() return edges
def _get_circles(img, board, pattern): """ Get circle centers for a symmetric or asymmetric grid """ w, h = cv.GetSize(img) mono = cv.CreateMat(h, w, cv.CV_8UC1) cv.CvtColor(img, mono, cv.CV_BGR2GRAY) flag = cv2.CALIB_CB_SYMMETRIC_GRID if pattern == Patterns.ACircles: flag = cv2.CALIB_CB_ASYMMETRIC_GRID mono_arr = numpy.array(mono) (ok, corners) = cv2.findCirclesGrid(mono_arr, (board.n_cols, board.n_rows), flags=flag) # In symmetric case, findCirclesGrid does not detect the target if it's turned sideways. So we try # again with dimensions swapped - not so efficient. # TODO Better to add as second board? Corner ordering will change. if not ok and pattern == Patterns.Circles: (ok, corners) = cv2.findCirclesGrid(mono_arr, (board.n_rows, board.n_cols), flags=flag) # For some reason findCirclesGrid returns centers as [[x y]] instead of (x y) like FindChessboardCorners if corners is not None: corners = [(x, y) for [[x, y]] in corners] return (ok, corners)
def initGrab(self): image = ImageGrab.grab(self.geometry) cv_im = cv.CreateImageHeader(image.size, cv.IPL_DEPTH_8U, 3) cv.SetData(cv_im, image.tostring()) cv.CvtColor(cv_im, cv_im, cv.CV_RGB2BGR) fourcc = cv.CV_FOURCC('D','I','V','X') fps = 25 width, height = cv.GetSize(cv_im) #print width, height self.writer = cv.CreateVideoWriter('out3.avi', fourcc, fps, (int(width), int(height)), 1) cv.WriteFrame(self.writer, cv_im) self.frames_count = 1 timer = QtCore.QTimer() time_interval = 1000 / 25 timer.setInterval(time_interval) timer.timeout.connect(self.grabFrame) timer.start() self.timer = timer self.stopTimer = QtCore.QTimer() self.stopTimer.setInterval(self.total_time) self.stopTimer.timeout.connect(self.stopCapture) self.stopTimer.setSingleShot(True) self.stopTimer.start()
def normalize(self, image): #Checks whether inputs are correct if self.image_check(image) < 0: return -1 #chaning the image to grayscale gsimage = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_8U, 1) newgsimage = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_8U, 1) cv.CvtColor(image, gsimage, cv.CV_RGB2GRAY) cv.EqualizeHist(gsimage, newgsimage) if self.visualize: while True: cv.NamedWindow("Normal") cv.ShowImage("Normal", gsimage) cv.WaitKey(5) cv.NamedWindow("Histogram Equalized") cv.ShowImage("Histogram Equalized", newgsimage) if cv.WaitKey(5) == 1048603: break cv.DestroyAllWindows() return newgsimage
def initGrabQt(self): image_qt = QtGui.QPixmap.grabWidget(self.view).toImage() image_qt_size = (image_qt.size().width(), image_qt.size().height()) cv_im_4chan = cv.CreateImageHeader(image_qt_size, cv.IPL_DEPTH_8U, 4) cv_im = cv.CreateImage(image_qt_size, cv.IPL_DEPTH_8U, 3) cv.SetData(cv_im_4chan, image_qt.bits().asstring(image_qt.numBytes())) cv.CvtColor(cv_im_4chan, cv_im, cv.CV_RGBA2RGB) fourcc = cv.CV_FOURCC('D','I','V','X') fps = 25 width, height = cv.GetSize(cv_im) self.writer = cv.CreateVideoWriter('out3.avi', fourcc, fps, (int(width), int(height)), 1) cv.WriteFrame(self.writer, cv_im) timer = QtCore.QTimer() time_interval = 1000 / 25 timer.setInterval(time_interval) timer.timeout.connect(self.grabFrameQt) timer.start() self.timer = timer self.stopTimer = QtCore.QTimer() self.stopTimer.setInterval(self.total_time) self.stopTimer.timeout.connect(self.stopCapture) self.stopTimer.setSingleShot(True) self.stopTimer.start()
def _mixImageSelfMask(self, wipeSettings, level, image1, image2, mixMat, whiteMode): cv.CvtColor(image2, self._mixImageMask, cv.CV_BGR2GRAY) if(whiteMode == True): cv.CmpS(self._mixImageMask, 250, self._mixImageMask, cv.CV_CMP_LT) else: cv.CmpS(self._mixImageMask, 5, self._mixImageMask, cv.CV_CMP_GT) return self._mixImageAlphaMask(wipeSettings, level, image1, image2, self._mixImageMask, mixMat)
def url_jpg_contours(url): position = 100 filedata = urllib2.urlopen(url).read() imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1) cv.SetData(imagefiledata, filedata, len(filedata)) im = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR) col_edge = cv.CreateImage((im.width, im.height), 8, 3) # convert to grayscale gray_im = cv.CreateImage((im.width, im.height), 8, 1) edge_im = cv.CreateImage((im.width, im.height), 8, 1) cv.CvtColor(im, gray_im, cv.CV_BGR2GRAY) cv.Canny(gray_im, edge_im, position, position * 3, 3) cv.SetZero(col_edge) # copy edge points cv.Copy(im, col_edge, edge_im) edge_im_array = np.asarray(edge_im[:]) ret, edge_im_array = cv2.threshold(edge_im_array, 127, 255, cv2.THRESH_BINARY) contours, hierarchy = cv2.findContours(edge_im_array, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) scale = 10000.0 points = [] for contour in contours: for i in contour: for j in i: lng_offset = j[0] / scale lat_offset = j[1] / scale points.append([lng_offset, lat_offset]) return points
def initialize_motion_detection(self): """Initialize objects used for motion detection.""" self.camera = cv.CaptureFromCAM(0) #cv.NamedWindow(self.windowName, cv.CV_WINDOW_AUTOSIZE) # lower the resolution of the camera height = 120 width = 160 cv.SetCaptureProperty(self.camera, cv.CV_CAP_PROP_FRAME_WIDTH, width) cv.SetCaptureProperty(self.camera, cv.CV_CAP_PROP_FRAME_HEIGHT, height) # set variables containing video information self.colorFrame = cv.QueryFrame(self.camera) self.imageHeight = self.colorFrame.height self.imageWidth= self.colorFrame.width self.numPixels = self.imageHeight * self.imageWidth depth = self.colorFrame.depth imageSize = cv.GetSize(self.colorFrame) # create image structure for processing self.previousGrayFrame = cv.CreateImage(imageSize, depth, 1) self.currentGrayFrame = cv.CreateImage(imageSize, depth, 1) self.resultImage = cv.CreateImage(imageSize, depth, 1) cv.CvtColor(self.colorFrame, self.previousGrayFrame, cv.CV_RGB2GRAY) self.previousGrayFrame = self.reduce_image_noise(self.previousGrayFrame)
def processImage(self, curframe): cv.Smooth(curframe, curframe) #Remove false positives if not self.absdiff_frame: #For the first time put values in difference, temp and moving_average self.absdiff_frame = cv.CloneImage(curframe) self.previous_frame = cv.CloneImage(curframe) cv.Convert( curframe, self.average_frame ) #Should convert because after runningavg take 32F pictures else: cv.RunningAvg(curframe, self.average_frame, 0.05) #Compute the average cv.Convert(self.average_frame, self.previous_frame) #Convert back to 8U frame cv.AbsDiff(curframe, self.previous_frame, self.absdiff_frame) # moving_average - curframe cv.CvtColor( self.absdiff_frame, self.gray_frame, cv.CV_RGB2GRAY) #Convert to gray otherwise can't do threshold cv.Threshold(self.gray_frame, self.gray_frame, 50, 255, cv.CV_THRESH_BINARY) cv.Dilate(self.gray_frame, self.gray_frame, None, 15) #to get object blobs cv.Erode(self.gray_frame, self.gray_frame, None, 10)
def collect_costs_info(cost_info, counts, frame, mask, frec, max_rec, idx): # Now convert incoming frame to HSV color space and # copy the color of each pixel to the corresponding # location in newInfo hsvIm = cv.fromarray(frame) cv.CvtColor(hsvIm, hsvIm, cv.CV_RGB2HSV) frame_hsv = np.asarray(hsvIm) dispX = int(np.round(frec.left - max_rec.left)) dispY = int(np.round(frec.top - max_rec.top)) blndMask = np.zeros(counts.shape, dtype=int) cost_info[dispY:int(dispY + np.round(frec.height())):, dispX:int(dispX + np.round(frec.width())):, idx, :3] \ = frame_hsv[:, :, :3] blndMask[dispY:int(dispY + np.round(frec.height())):, dispX:int(dispX + np.round(frec.width())):] \ = mask[:, :] blndMask = blndMask > 0 counts[blndMask] = counts[blndMask] + 1 return cost_info
def getthresholdedimg(im): # this function take RGB image.Then convert it into HSV for easy colour detection # and threshold it with yellow and blue part as white and all other regions as black.Then return that image global imghsv imghsv = cv.CreateImage(cv.GetSize(im),8,3) # Convert image from RGB to HSV cv.CvtColor(im,imghsv,cv.CV_BGR2HSV) # creates images for blue imgblue = cv.CreateImage(cv.GetSize(im),8,1) # creates blank image to which color images are added imgthreshold = cv.CreateImage(cv.GetSize(im),8,1) # determine HSV color thresholds for yellow, blue, and green # cv.InRange(src, lowerbound, upperbound, dst) # for imgblue, lowerbound is 95, and upperbound is 115 cv.InRangeS(imghsv, cv.Scalar(55,100,100), cv.Scalar(155,255,255), imgblue) #55/155 original, 105 seems to be lower threshold needed to eliminate flag detection # add color thresholds to blank 'threshold' image cv.Add(imgthreshold, imgblue, imgthreshold) return imgthreshold
def get_image(camera, filename=None): im = cv.QueryFrame(camera) # take greyscale and compute RMS value im2 = cv.CreateImage(cv.GetSize(im), cv.IPL_DEPTH_32F, 3) cv.Convert(im, im2) gray = cv.CreateImage(cv.GetSize(im), cv.IPL_DEPTH_32F, 1) cv.CvtColor(im2, gray, cv.CV_RGB2GRAY) gray_mat = cv.GetMat(gray) img = numpy.asarray(gray_mat) power = numpy.sqrt(numpy.mean(img**2)) #save file if filename: font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.8, 0.8, 0, 2, cv.CV_AA) cv.PutText(im, filename, (DATE_X, DATE_Y), font, cv.RGB(255, 255, 0)) filename = os.path.join(DIR_PREFIX, filename + '.jpg') print filename cv.SaveImage(filename, im) del font else: filename = '' #del(camera) del im, im2, gray, img, gray_mat return (power, filename)