def main(): # Initialization highgui.cvNamedWindow("Guardian", 1) signal.signal(signal.SIGINT, handler) # Stage #robot = playerc.playerc_client(None, "localhost", 6665) # Corobot robot = playerc.playerc_client(None, "corobot-w.wifi.wpi.edu", 6665) robot.connect() p2dproxy = playerc.playerc_position2d(robot, 0) p2dproxy.subscribe(playerc.PLAYERC_OPEN_MODE) p2dproxy.get_geom() robot.read() while True: image = highgui.cvQueryFrame(camera) detectObject(image) p2dproxy.set_cmd_vel(speed[0], 0, speed[1], 0) draw_gui(image) highgui.cvShowImage("Guardian", image) if highgui.cvWaitKey(20) != -1: break highgui.cvDestroyWindow("Guardian") p2dproxy.set_cmd_vel(0, 0, 0, 0)
def run(self, images, display=True, verbose=False, debug=False): left_detection, left_intensity_motion_activations, left_image, left_combined_masks = self.left_detector.detect( images[0]) self.record(left_detection, left_image, left_intensity_motion_activations) right_detection, right_intensity_motion_activations, right_image, right_combined_masks = self.right_detector.detect( images[1]) self.record(right_detection, right_image, right_intensity_motion_activations) if debug: motion, intensity = self.left_detector.get_motion_intensity_images( ) show_processed(left_image, [left_combined_masks, motion, intensity], left_detection, left_intensity_motion_activations, self.left_detector) elif display: draw_detection(left_image, left_detection) hg.cvShowImage('video', left_image) if left_detection != None and right_detection != None: if (self.expecting_correct_labels and self.expected_class == 0): print 'EmbodiedLaserDetector: output suppressed, classified positive in both cameras.' else: return self.triangulate(left_detection, right_detection) return None
def imagesc(self, im, clims=None): """ Display a normalized version of the image """ if (self.currentWindow == -1): self.display() # don't normalize multichannel image #if(im.nChannels>1): # if(im.depth!=cv.IPL_DEPTH_8U): # im2 = cvCreateImage( cvSize(im.width, im.height), cv.IPL_DEPTH_8U, im.nChannels) # cvScale(im, im2) # im = im2 # cvShowImage(self.currentWindowName, im) # return self.currentWindow # normalize image if clims: [minv, maxv] = clims else: [minv, maxv] = cvMinMaxLoc(im) if maxv != minv: s = 255.0 / (maxv - minv) shift = 255 * (-minv) / (maxv - minv) else: s = 1.0 shift = -maxv im2 = array(size(im), 'uint8') cvConvertScale(im, im2, s, shift) cvShowImage(self.currentWindowName, im2)
def imagesc(self,im, clims=None): """ Display a normalized version of the image """ if(self.currentWindow==-1): self.display() # don't normalize multichannel image #if(im.nChannels>1): # if(im.depth!=cv.IPL_DEPTH_8U): # im2 = cvCreateImage( cvSize(im.width, im.height), cv.IPL_DEPTH_8U, im.nChannels) # cvScale(im, im2) # im = im2 # cvShowImage(self.currentWindowName, im) # return self.currentWindow # normalize image if clims: [minv, maxv] = clims else: [minv,maxv] = cvMinMaxLoc(im) if maxv != minv: s = 255.0/(maxv-minv) shift = 255*(-minv)/(maxv-minv) else: s = 1.0 shift = -maxv im2 = array( size(im), 'uint8' ) cvConvertScale(im, im2, s, shift) cvShowImage(self.currentWindowName, im2)
def display_array(iar): left = ut.ros2cv(iar.images[0]) right = ut.ros2cv(iar.images[1]) hg.cvShowImage('channel 1', left) hg.cvShowImage('channel 2', right) hg.cvWaitKey(5)
def main(): print "FaceIn! an OpenCV Python Face Recognition Program" highgui.cvNamedWindow ('Camera', highgui.CV_WINDOW_AUTOSIZE) highgui.cvMoveWindow ('Camera', 10, 10) device = 0 #use first device found capture = highgui.cvCreateCameraCapture (device) frame = highgui.cvQueryFrame (capture) frame_size = cv.cvGetSize (frame) fps = 30 while 1: frame = highgui.cvQueryFrame (capture) detectFace(frame) # display the frames to have a visual output highgui.cvShowImage ('Camera', frame) # handle events k = highgui.cvWaitKey (5) if k % 0x100 == 27: # user has press the ESC key, so exit quit()
def main(): print "FaceIn! an OpenCV Python Face Recognition Program" highgui.cvNamedWindow('Camera', highgui.CV_WINDOW_AUTOSIZE) highgui.cvMoveWindow('Camera', 10, 10) device = 0 #use first device found capture = highgui.cvCreateCameraCapture(device) frame = highgui.cvQueryFrame(capture) frame_size = cv.cvGetSize(frame) fps = 30 while 1: frame = highgui.cvQueryFrame(capture) detectFace(frame) # display the frames to have a visual output highgui.cvShowImage('Camera', frame) # handle events k = highgui.cvWaitKey(5) if k % 0x100 == 27: # user has press the ESC key, so exit quit()
def on_trackbar (position): # create the image for putting in it the founded contours contours_image = cv.cvCreateImage (cv.cvSize (_SIZE, _SIZE), 8, 3) # compute the real level of display, given the current position levels = position - 3 # initialisation _contours = contours if levels <= 0: # zero or negative value # => get to the nearest face to make it look more funny _contours = contours.h_next.h_next.h_next # first, clear the image where we will draw contours cv.cvSetZero (contours_image) # draw contours in red and green cv.cvDrawContours (contours_image, _contours, _red, _green, levels, 3, cv.CV_AA, cv.cvPoint (0, 0)) # finally, show the image highgui.cvShowImage ("contours", contours_image)
def on_trackbar(position): # create the image for putting in it the founded contours contours_image = cv.cvCreateImage(cv.cvSize(_SIZE, _SIZE), 8, 3) # compute the real level of display, given the current position levels = position - 3 # initialisation _contours = contours if levels <= 0: # zero or negative value # => get to the nearest face to make it look more funny _contours = contours.h_next.h_next.h_next # first, clear the image where we will draw contours cv.cvSetZero(contours_image) # draw contours in red and green cv.cvDrawContours(contours_image, _contours, _red, _green, levels, 3, cv.CV_AA, cv.cvPoint(0, 0)) # finally, show the image highgui.cvShowImage("contours", contours_image)
def main(): usage = "%prog [options] <imgfile>" version = "%prog 0.2\n Longbin Chen, [email protected]" oparser = optparse.OptionParser(usage=usage, version=version) oparser.add_option('-d', '--display', action="store_true", dest = 'display', default = False, help = 'display the image') oparser.add_option('-m', '--drawnumber', action="store_true", dest = 'drawnumber', default = False, help = 'display the point numbers') oparser.add_option('-n', '--number', dest = 'num', type='int',default = 200 , help = 'the number of feature points') oparser.add_option('-t', '--threshold', dest = 'threshold', type='int',default = 100 , help = 'the threshold for image binarification') oparser.add_option('-o', '--output', dest = 'output', default = None, help = 'output file') oparser.add_option('-s', '--save', dest = 'save', default = None, help = 'save the img file') (options, args) = oparser.parse_args(sys.argv) if len(args) != 2: oparser.parse_args([sys.argv[0], "--help"]) sys.exit(1) ct = ExtractMSS() ct.GetContour(args[1], options) if (options.display): ct.start = options.threshold ct.DrawKeyPoints() highgui.cvNamedWindow ("contour", 1) highgui.cvShowImage ("contour", ct.drawimg) highgui.cvWaitKey (0) if (options.output): ct.mss.save(options.output) if (options.save): highgui.cvSaveImage(options.save, ct.drawimg)
def show_image(window_name, img, wait=False): hg.cvStartWindowThread() RESIZABLE = 0 hg.cvNamedWindow(window_name, RESIZABLE) hg.cvShowImage(window_name, img) if wait: print 'show_image: press any key to continue..' cv.highgui.cvWaitKey()
def show(fr,width,height,name): image = cv.cvCreateImage(cv.cvSize (width, height),8,1) l = 0 for j in range(0,image.width): for i in range(0,image.height): cv.cvSet2D(image,i,j,int(fr[l][0])); l=l+1 highgui.cvShowImage(name,image) highgui.cvWaitKey(1000/29)
def image(self, im): """ Display image as is -- probably not what you'd expect for FP or integer images """ if(self.currentWindow==-1): self.display() cvShowImage(self.currentWindowName,im) return self.currentWindow
def image(self, im): """ Display image as is -- probably not what you'd expect for FP or integer images """ if (self.currentWindow == -1): self.display() cvShowImage(self.currentWindowName, im) return self.currentWindow
def main(argv): # Frames per second fps = 20 tux_pos = 5 tux_pos_min = 0.0 tux_pos_max = 9.0 try: opts, args = getopt.getopt(argv, "fps", ["framerate=",]) except getopt.GetoptError: sys.exit(2) for opt, arg in opts: if opt in ("-fps", "--framerate"): fps = arg camera = highgui.cvCreateCameraCapture(0) while True: highgui.cvNamedWindow('Camera', 1) im = highgui.cvQueryFrame(camera) if im is None: break # mirror opencv.cv.cvFlip(im, None, 1) # positions = face.detect(im, 'haarcascade_data/haarcascade_profileface.xml') positions = face.detect(im, 'haarcascade_data/haarcascade_frontalface_alt2.xml') # if not positions: # positions = face.detect(im, 'haarcascade_data/haarcascade_frontalface_alt2.xml') # display webcam image highgui.cvShowImage('Camera', im) # Division of the screen to count as "walking" motion to trigger tux image_size = opencv.cvGetSize(im) motion_block = image_size.width / 9 if positions: mp = None for position in positions: if not mp or mp['width'] > position['width']: mp = position pos = (mp['x'] + (mp['width'] / 2)) / motion_block print "tux pos: %f" % tux_pos print "pos: %f" % pos if pos != tux_pos: if tux_pos > pos: move_tux_right(tux_pos - pos) elif tux_pos < pos: move_tux_left(pos - tux_pos) tux_pos = pos if highgui.cvWaitKey(fps) >= 0: highgui.cvDestroyWindow('Camera') sys.exit(0)
def display(vec, name): patch, context = reconstruct_input(vec) patch = scale_image(patch, 5) context = scale_image(context, 5) hg.cvSaveImage(name + '_patch.png', patch) hg.cvSaveImage(name + '_context.png', context) hg.cvShowImage('image', patch) hg.cvShowImage('context', context) hg.cvWaitKey()
def getFilter(frameWidht, frameHeight): cvNamedWindow("Filtred") cvCreateTrackbar("hmax", "Filtred", getHlsFilter('hmax'), 180, trackBarChangeHmax) cvCreateTrackbar("hmin", "Filtred", getHlsFilter('hmin'), 180, trackBarChangeHmin) #cvCreateTrackbar("lmax", "Filtred", hlsFilter['lmax'], 255, trackBarChangeLmax) #cvCreateTrackbar("lmin", "Filtred", hlsFilter['lmin'], 255, trackBarChangeLmin) cvCreateTrackbar("smax", "Filtred", getHlsFilter('smax'), 255, trackBarChangeSmax) cvCreateTrackbar("smin", "Filtred", getHlsFilter('smin'), 255, trackBarChangeSmin) cvSetMouseCallback("Filtred", mouseClick, None) frame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3) hlsFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3) filtredFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3) mask = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1) hFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1) lFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1) sFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1) ThHFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1) ThLFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1) ThSFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1) key = -1 while key == -1: if not cvGrabFrame(CAM): print "Could not grab a frame" exit frame = cvQueryFrame(CAM) cvCvtColor(frame, hlsFrame, CV_BGR2HLS) cvSplit(hlsFrame, hFrame, lFrame, sFrame, None) pixelInRange(hFrame, getHlsFilter('hmin'), getHlsFilter('hmax'), 0, 180, ThHFrame) #pixelInRange(lFrame, getHlsFilter('lmin'), getHlsFilter('lmax'), 0, 255, ThLFrame) pixelInRange(sFrame, getHlsFilter('smin'), getHlsFilter('smax'), 0, 255, ThSFrame) cvSetZero(mask) cvAnd(ThHFrame, ThSFrame, mask) cvSetZero(filtredFrame) cvCopy(frame, filtredFrame, mask) cvShowImage("Filtred", filtredFrame) key = cvWaitKey(10) if key == 'r': key = -1 resetHlsFilter() cvDestroyWindow("Filtred")
def seek_onChange(pos, capture, windowName): '''Callback for the seek trackbar''' print 'Seeking to frame: %d' % pos # Set the pointer to frame pos and grab the frame highgui.cvSetCaptureProperty(capture, highgui.CV_CAP_PROP_POS_FRAMES, pos*3600 - 1) frame = highgui.cvQueryFrame(capture) # Display the frame on the window highgui.cvShowImage(windowName, frame)
def seek_onChange(pos, capture, windowName): '''Callback for the seek trackbar''' print 'Seeking to frame: %d' % pos # Set the pointer to frame pos and grab the frame highgui.cvSetCaptureProperty(capture, highgui.CV_CAP_PROP_POS_FRAMES, pos * 3600 - 1) frame = highgui.cvQueryFrame(capture) # Display the frame on the window highgui.cvShowImage(windowName, frame)
def run(self): """ Consume images from the webcam at 25fps. If visualize is True, show the result in the screen. """ if self.visualize: highgui.cvNamedWindow('DucksboardFace') while self.running: self.image = highgui.cvQueryFrame(self.camera) if self.visualize: highgui.cvShowImage('DucksboardFace', self.image) highgui.cvWaitKey(1000 / 25)
def main(): # ctrl+c to end global h,s,v,h2,v2,s2,d,e highgui.cvNamedWindow("Camera 1", 1) highgui.cvNamedWindow("Orig", 1) highgui.cvCreateTrackbar("H", "Camera 1", h, 256, tb_h) highgui.cvCreateTrackbar("S", "Camera 1", s, 256, tb_s) highgui.cvCreateTrackbar("V", "Camera 1", v, 256, tb_v) highgui.cvCreateTrackbar("H2", "Camera 1", h2, 256, tb_h2) highgui.cvCreateTrackbar("S2", "Camera 1", s2, 256, tb_s2) highgui.cvCreateTrackbar("V2", "Camera 1", v2, 256, tb_v2) highgui.cvCreateTrackbar("Dilate", "Camera 1", d, 30, tb_d) highgui.cvCreateTrackbar("Erode", "Camera 1", e, 30, tb_e) cap = highgui.cvCreateCameraCapture(1) highgui.cvSetCaptureProperty(cap, highgui.CV_CAP_PROP_FRAME_WIDTH, IMGW) highgui.cvSetCaptureProperty(cap, highgui.CV_CAP_PROP_FRAME_HEIGHT, IMGH) c = 0 t1 = tdraw = time.clock() t = 1 font = cv.cvInitFont(cv.CV_FONT_HERSHEY_PLAIN, 1, 1) while c != 0x27: image = highgui.cvQueryFrame(cap) if not image: print "capture failed" break thresh = cv.cvCreateImage(cv.cvSize(IMGW,IMGH),8,1) cv.cvSetZero(thresh) cv.cvCvtColor(image,image,cv.CV_RGB2HSV) cv.cvInRangeS(image, (h,s,v,0), (h2,s2,v2,0), thresh) result = cv.cvCreateImage(cv.cvSize(IMGW,IMGH),8,3) cv.cvSetZero(result) cv.cvOr(image,image,result,thresh) for i in range(1,e): cv.cvErode(result,result) for i in range(1,d): cv.cvDilate(result,result) # floodfill objects back in, allowing threshold differences outwards t2 = time.clock() if t2 > tdraw+0.3: t = t2-t1 tdraw=t2 cv.cvPutText(result, "FPS: " + str(1 / (t)), (0,25), font, (255,255,255)) t1 = t2 highgui.cvShowImage("Orig", image) highgui.cvShowImage("Camera 1", result) c = highgui.cvWaitKey(10)
def show_processed(image, masks, detection, blobs, detector): masker = Mask(image) splitter = SplitColors(image) r, g, b = splitter.split(image) thresholded_image = masker.mask(masks[0], r, g, b) draw_detection(thresholded_image, detection) hg.cvShowImage('thresholded', thresholded_image) draw_detection(image, detection) draw_blobs(image, blobs) make_visible_binary_image(masks[0]) draw_detection(masks[0], detection) make_visible_binary_image(masks[1]) make_visible_binary_image(masks[2]) hg.cvShowImage("video", image) hg.cvShowImage('motion', masks[1]) hg.cvShowImage('intensity', masks[2]) key = hg.cvWaitKey(10) if detector != None: if key == 'T': #down detector.intensity_filter.thres_high = detector.intensity_filter.thres_high - 5 print 'detector.intensity_filter.thres =', detector.intensity_filter.thres_high if key == 'R': detector.intensity_filter.thres_high = detector.intensity_filter.thres_high + 5 print 'detector.intensity_filter.thres =', detector.intensity_filter.thres_high if key == ' ': hg.cvWaitKey()
def main(): """ Just the test This method is a god resource on how to handle the results """ filename = sys.argv[1] image = highgui.cvLoadImage (filename) print "DO NOT EXPECT THE RUNNING TIME OF THIS TEST TO BE REPRESENTATIVE!" print "" print "THRESHOLDS AND EVERYTHING ELSE ARE HARDCODED!" cutRatios = [0.6667, lib.PHI, 0.6] settings = Settings(cutRatios) # Run the analysis with the above settings comps = naiveMethod.analyzeImage(image, settings) # This is just for drawing the results # The below methods can probably be combined but don't bother # {{{ # Get and draw the cuts cuts = {} for ratio in settings.cutRatios: cuts[str(ratio)] = lib.findMeans(cv.cvGetSize(image), ratio) for ratio in cuts: lib.drawLines(image, None, cuts[ratio], lib.getRandomColor()) # Get and draw the components for ratio in comps: for cut in comps[ratio]: lib.drawBoundingBoxes(image, comps[ratio][cut]) # }}} winname = "Failure" highgui.cvNamedWindow (winname, highgui.CV_WINDOW_AUTOSIZE) while True: highgui.cvShowImage (winname, image) c = highgui.cvWaitKey(0) if c == 'q': print "Exiting ..." print "" sys.exit(0)
def showImage(image, name): """Helper method for displaying an image""" winname = name highgui.cvNamedWindow (winname, highgui.CV_WINDOW_AUTOSIZE) while True: highgui.cvShowImage (winname, image) c = highgui.cvWaitKey(0) if c == 'q': print "Exiting ..." print "" sys.exit(0)
def on_trackbar (position): #下面两句应该是没什么用的 cv.cvSmooth (gray, edge, cv.CV_BLUR, 3, 3, 0) #图像平滑 cv.cvNot (gray, edge) #计算数组元素的按位取反 # run the edge dector on gray scale cv.cvCanny (gray, edge, position, position * 3, 3) #采用 Canny 算法做边缘检测 # reset cv.cvSetZero (col_edge) #清空数组 # copy edge points cv.cvCopy (image, col_edge, edge) #参数edge影响拷贝的结果 # show the image highgui.cvShowImage (win_name, col_edge)
def on_trackbar (position): cv.cvSmooth (gray, edge, cv.CV_BLUR, 3, 3, 0) cv.cvNot (gray, edge) # run the edge dector on gray scale cv.cvCanny (gray, edge, position, position * 3, 3) # reset cv.cvSetZero (col_edge) # copy edge points cv.cvCopy (image, col_edge, edge) # show the image highgui.cvShowImage (win_name, col_edge)
def on_trackbar(position): cv.cvSmooth(gray, edge, cv.CV_BLUR, 3, 3, 0) cv.cvNot(gray, edge) # run the edge dector on gray scale cv.cvCanny(gray, edge, position, position * 3, 3) # reset cv.cvSetZero(col_edge) # copy edge points cv.cvCopy(image, col_edge, edge) # show the image highgui.cvShowImage(win_name, col_edge)
def recognize_face(): try: argsnum=len(sys.argv) print "args:",argsnum #if(argsnum<5): # print "usage:python pyfaces.py imgname dirname numofeigenfaces threshold " # sys.exit(2) #imgname=sys.argv[1] #dirname=sys.argv[2] #egfaces=int(sys.argv[3]) #thrshld=float(sys.argv[4]) capture=hg.cvCreateCameraCapture(0) hg.cvNamedWindow("Snapshot") i=0 #time.sleep(1) myframe=0 imgname='sample.png' dirname='images' egfaces=5 thrshld=0.3 #frame=0 while 1: frame=hg.cvQueryFrame(capture) #print type(frame) hg.cvShowImage("Snapshot",frame) key = hg.cvWaitKey(5) if key=='c' or key=='C': hg.cvDestroyWindow("Snapshot") hg.cvSaveImage(imgname,frame) global_frame=frame break #print frame #sys.exit(0) pyf=PyFaces(imgname,dirname,egfaces,thrshld,frame) #if pyfaces returns false then save this image into images folder hg.cvReleaseCapture(capture) return pyf.getFileName() except Exception,detail: print detail print "usage:python pyfaces.py imgname dirname numofeigenfaces threshold "
def compareImages(img1, img2, name1, name2): # Do NOT save images to disk in this method winname1 = name1 winname2 = name2 highgui.cvNamedWindow (winname1, highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow (winname2, highgui.CV_WINDOW_AUTOSIZE) while True: highgui.cvShowImage (winname1, img1) highgui.cvShowImage (winname2, img2) c = highgui.cvWaitKey(0) if c == 'q': print "Exiting ..." print "" sys.exit(0)
def main(): usage = "%s [options] <imgfile> " % (sys.argv[0]) version = "%prog 0.2\n Longbin Chen, [email protected]" oparser = optparse.OptionParser(usage=usage, version=version) oparser.add_option('-d', '--display', action="store_true", dest = 'display', default = False, help = 'display the image') oparser.add_option('-c','--contour', action="store_true", dest = 'contour', default = False, help = 'show object contour') oparser.add_option('-i','--image', action="store_true", dest = 'image', default = False, help = 'show original images') oparser.add_option('-n', '--number', dest = 'num', type='int', default = 200 , help = 'the number of feature points') oparser.add_option('-x','--enlarge', dest = 'enlarge', default = 1.0 , type = float, help = 'resize images, default:1.0') oparser.add_option('-o', '--output', dest = 'output', default = None, help = 'output file') oparser.add_option('-p', '--pointfile', dest = 'pointfile', default = None, help = 'use pointfile ') oparser.add_option('-r', '--harris', dest = 'harris', default = False, action = "store_true", help = 'use harris detector') oparser.add_option('-s', '--save', dest = 'save', default = None, help = 'save the img file') (options, args) = oparser.parse_args(sys.argv) if len(args) != 2: oparser.parse_args([sys.argv[0], "--help"]) sys.exit(1) if (options.pointfile == None and options.harris == None): print >> sys.stderr, "either of pointfile and harris can be valid" sys.exit(1) highgui.cvNamedWindow ("Corner1", 1) ct = Linker(options.contour, options.image, options.enlarge, options.num) if (options.pointfile): ct.LoadPoints(options.pointfile) ct.LinkPoints(args[1]) else: ct.HarrisPoints(args[1]) ct.LinkPoints(args[1]) highgui.cvShowImage ("Corner1", ct.drawimg) highgui.cvWaitKey (0) if (options.save): highgui.cvSaveImage(options.save, ct.drawimg) if (options.output): f = open(options.output, "w") f.write(OUT.getvalue()) f.close() OUT.close()
def startChroma(background, frameWidht, frameHeight): #cvNamedWindow("Original") cvNamedWindow("Chroma") hlsFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3) transparency = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3) mask = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1) hFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1) lFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1) sFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1) ThHFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1) ThLFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1) ThSFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1) key = -1 while key == -1: if not cvGrabFrame(CAM): print "Could not grab a frame" exit frame = cvQueryFrame(CAM) cvCvtColor(frame, hlsFrame, CV_BGR2HLS) cvSplit(hlsFrame, hFrame, lFrame, sFrame, None) pixelInRange(hFrame, getHlsFilter('hmin'), getHlsFilter('hmax'), 0, 180, ThHFrame) #pixelInRange(lFrame, getHlsFilter('lmin'), getHlsFilter('lmax'), 0, 255, ThLFrame) pixelInRange(sFrame, getHlsFilter('smin'), getHlsFilter('smax'), 0, 255, ThSFrame) cvAnd(ThHFrame, ThSFrame, mask) cvCopy(background, frame, mask) cvShowImage("Chroma", frame) key = cvWaitKey(10) cvDestroyWindow("Chroma")
def run(self, images, display=True, verbose=False, debug=False): left_detection, left_intensity_motion_activations, left_image, left_combined_masks = self.left_detector.detect(images[0]) self.record(left_detection, left_image, left_intensity_motion_activations) right_detection, right_intensity_motion_activations, right_image, right_combined_masks = self.right_detector.detect(images[1]) self.record(right_detection, right_image, right_intensity_motion_activations) if debug: motion, intensity = self.left_detector.get_motion_intensity_images() show_processed(left_image, [left_combined_masks, motion, intensity], left_detection, left_intensity_motion_activations, self.left_detector) elif display: draw_detection(left_image, left_detection) hg.cvShowImage('video', left_image) if left_detection != None and right_detection != None: if (self.expecting_correct_labels and self.expected_class == 0): print 'EmbodiedLaserDetector: output suppressed, classified positive in both cameras.' else: return self.triangulate(left_detection, right_detection) return None
def opencvSnap(dev,size): """ An example use of the "camera" taking a single picture frame using opencv's cvMat as the return method. """ # First lets take a picture using opencv, and display it using opencv... cvWin = hg.cvNamedWindow( "Opencv Rendering and Capture", 0 ) print("Opening device %s, with video size (%s,%s)" % (dev,size[0],size[1])) # creates the camera of the specified size and in RGB colorspace cam = Camera(dev, size, "RGB") a = cam.get_image() hg.cvShowImage ('Opencv Rendering and Capture', a) # close the capture stream to avoid problems later, should see the camera turn off hg.cvReleaseCapture(cam.capture) del cam # Wait for any key then clean up print("Press any key to continue") k = hg.cvWaitKey() hg.cvDestroyWindow("Opencv Rendering and Capture")
def display_images(image_list, max_x=1200, max_y=1000, save_images=False): """ Display a list of OpenCV images tiled across the screen with maximum width of max_x and maximum height of max_y save_images - will save the images(with timestamp) """ curtime = time.localtime() date_name = time.strftime('%Y_%m_%d_%I%M%S', curtime) loc_x, loc_y = 0, 0 wins = [] for i, im in enumerate(image_list): if save_images: if im.nChannels == 1 and im.depth == cv.IPL_DEPTH_32F: clr = cv.cvCreateImage(cv.cvSize(im.width, im.height), cv.IPL_DEPTH_8U, 1) cv.cvConvertScale(im, clr, 255.0) im = clr highgui.cvSaveImage('image%d_' % i + date_name + '.png', im) window_name = 'image %d' % i wins.append((window_name, im)) highgui.cvNamedWindow(window_name, highgui.CV_WINDOW_AUTOSIZE) highgui.cvMoveWindow(window_name, loc_x, loc_y) loc_x = loc_x + im.width if loc_x > max_x: loc_x = 0 loc_y = loc_y + im.height if loc_y > max_y: loc_y = 0 while True: for name, im in wins: highgui.cvShowImage(name, im) keypress = highgui.cvWaitKey(10) if keypress == '\x1b': break
def visualize(eigens): l1 = eigens[:, :, 0] l2 = eigens[:, :, 1] m1 = np.min(l1) m2 = np.min(l2) r1 = np.max(l1) - m1 r2 = np.max(l2) - m2 if r1 == 0: r1 = 1 if r2 == 0: r2 = 1 l1cv = ut.np2cv(np.array((1 - ((l1 - m1) / r1)) * 255, dtype="uint8")) l2cv = ut.np2cv(np.array((1 - ((l2 - m2) / r2)) * 255, dtype="uint8")) hg.cvNamedWindow("eigen value 1", 1) hg.cvNamedWindow("eigen value 2", 1) hg.cvShowImage("eigen value 1", l1cv) hg.cvShowImage("eigen value 2", l2cv) while True: k = hg.cvWaitKey(33) if k == " ": return if k == "x": exit()
def opencvSnap(dev, size): """ An example use of the "camera" taking a single picture frame using opencv's cvMat as the return method. """ # First lets take a picture using opencv, and display it using opencv... cvWin = hg.cvNamedWindow("Opencv Rendering and Capture", 0) print("Opening device %s, with video size (%s,%s)" % (dev, size[0], size[1])) # creates the camera of the specified size and in RGB colorspace cam = Camera(dev, size, "RGB") a = cam.get_image() hg.cvShowImage('Opencv Rendering and Capture', a) # close the capture stream to avoid problems later, should see the camera turn off hg.cvReleaseCapture(cam.capture) del cam # Wait for any key then clean up print("Press any key to continue") k = hg.cvWaitKey() hg.cvDestroyWindow("Opencv Rendering and Capture")
def visualize(eigens): l1 = eigens[:,:,0] l2 = eigens[:,:,1] m1 = np.min(l1) m2 = np.min(l2) r1 = np.max(l1) - m1 r2 = np.max(l2) - m2 if r1 == 0: r1 = 1 if r2 == 0: r2 = 1 l1cv = ut.np2cv(np.array( (1 - ((l1-m1) / r1)) * 255, dtype='uint8')) l2cv = ut.np2cv(np.array( (1 - ((l2-m2) / r2)) * 255, dtype='uint8')) hg.cvNamedWindow('eigen value 1', 1) hg.cvNamedWindow('eigen value 2', 1) hg.cvShowImage('eigen value 1', l1cv) hg.cvShowImage('eigen value 2', l2cv) while True: k = hg.cvWaitKey(33) if k == ' ': return if k == 'x': exit()
def main(): ct = Corner() usage = "%s [options] <imgfile>" % (sys.argv[0]) version = "%prog 0.2\n Longbin Chen, [email protected]" oparser = optparse.OptionParser(usage=usage, version=version) oparser.add_option('-d', '--display', action="store_true", dest = 'display', default = False, help = 'display the image') oparser.add_option('-n', '--number', dest = 'num', type='int',default = 200 , help = 'the number of feature points') oparser.add_option('-o', '--output', dest = 'output', default = None, help = 'output file') oparser.add_option('-s', '--save', dest = 'save', default = None, help = 'save the img file') (options, args) = oparser.parse_args(sys.argv) if len(args) != 2: oparser.parse_args([sys.argv[0], "--help"]) sys.exit(1) ct.GetCorner(args[1], options.num) if (options.display): ct.DrawKeyPoints() highgui.cvNamedWindow ("Corner1", 1) highgui.cvShowImage ("Corner1", ct.drawimg) highgui.cvWaitKey (0) if (options.save): highgui.cvSaveImage(options.save, ct.drawimg)
def display_images(image_list, max_x = 1200, max_y = 1000, save_images=False): """ Display a list of OpenCV images tiled across the screen with maximum width of max_x and maximum height of max_y save_images - will save the images(with timestamp) """ curtime=time.localtime() date_name = time.strftime('%Y_%m_%d_%I%M%S', curtime) loc_x, loc_y = 0, 0 wins = [] for i, im in enumerate(image_list): if save_images: if im.nChannels == 1 and im.depth == cv.IPL_DEPTH_32F: clr = cv.cvCreateImage(cv.cvSize(im.width, im.height), cv.IPL_DEPTH_8U, 1) cv.cvConvertScale(im, clr, 255.0) im = clr highgui.cvSaveImage('image%d_'%i+date_name+'.png', im) window_name = 'image %d' % i wins.append((window_name, im)) highgui.cvNamedWindow(window_name, highgui.CV_WINDOW_AUTOSIZE) highgui.cvMoveWindow(window_name, loc_x, loc_y) loc_x = loc_x + im.width if loc_x > max_x: loc_x = 0 loc_y = loc_y + im.height if loc_y > max_y: loc_y = 0 while True: for name, im in wins: highgui.cvShowImage(name, im) keypress = highgui.cvWaitKey(10) if keypress == '\x1b': break
def process(self, take_new_image=True): """We will take a snapshot, optionally do some arbitrary process (eg in numpy/scipy) then display it. If a frame is given use that instead of taking a new image. """ try: if take_new_image: logging.debug("capturing an image") self.snapshot = cv.cvCloneMat(hg.cvQueryFrame(self.camera)) if self.processFunction is not None: logging.debug("Sending image to process function") res = self.processFunction(self.snapshot) logging.debug("Received result from processing function") assert isinstance(res, cv.CvMat), "Not CvMat" self.snapshot = res if self.show: hg.cvShowImage(self.title, self.snapshot) except Exception, e: # If something goes wrong make sure we close the window logging.error("Error in processing image: %s" % e) hg.cvDestroyWindow(self.title) raise SystemExit
def process(self, take_new_image=True): """We will take a snapshot, optionally do some arbitrary process (eg in numpy/scipy) then display it. If a frame is given use that instead of taking a new image. """ try: if take_new_image: logging.debug("capturing an image") self.snapshot = cv.cvCloneMat( hg.cvQueryFrame( self.camera) ) if self.processFunction is not None: logging.debug("Sending image to process function") res = self.processFunction(self.snapshot) logging.debug("Received result from processing function") assert isinstance(res,cv.CvMat), "Not CvMat" self.snapshot = res if self.show: hg.cvShowImage( self.title, self.snapshot ) except Exception, e: # If something goes wrong make sure we close the window logging.error("Error in processing image: %s" % e) hg.cvDestroyWindow(self.title) raise SystemExit
def main(): if len(sys.argv)<2: print "throw a image my way" sys.exit(-1) print "Im testing" class Phony(): pass tmp = Phony() setattr(tmp, 'id', 1) setattr(tmp, 'location', sys.argv[1]) test = Painting(tmp) print test.getWidth() print test.getHeight() winname ="Test" highgui.cvNamedWindow(winname, highgui.CV_WINDOW_AUTOSIZE) while True: highgui.cvShowImage(winname,test.getImage()) c = highgui.cvWaitKey(0) if c == 'q': sys.exit(0)
def show_processed(image, masks, detection, blobs, detector): masker = Mask(image) splitter = SplitColors(image) r, g, b = splitter.split(image) thresholded_image = masker.mask(masks[0], r, g, b) draw_detection(thresholded_image, detection) hg.cvShowImage('thresholded', thresholded_image) draw_detection(image, detection) draw_blobs(image, blobs) make_visible_binary_image(masks[0]) draw_detection(masks[0], detection) make_visible_binary_image(masks[1]) make_visible_binary_image(masks[2]) hg.cvShowImage("video", image) hg.cvShowImage('motion', masks[1]) hg.cvShowImage('intensity', masks[2])
#if not writer: # print "Error opening writer" # sys.exit (1) print "starting loop" while 1: # do forever # 1. capture the current image frame = highgui.cvQueryFrame(capture) frame = highgui.cvRetrieveFrame(capture) #frame = highgui.cvGrabFrame (capture) #img = highgui.cvRetrieveFrame (capture) #cvGrabFrame(capture); // capture a frame #img=cvRetrieveFrame(capture); // retrieve the captured frame #cvWriteFrame(writer,img); // add the frame to the file highgui.cvShowImage('Camera', frame) # handle events #k = highgui.cvWaitKey () #print for event in pygame.event.get(): if event.type == pygame.QUIT: highgui.cvReleaseVideoWriter(writer) highgui.cvDestroyAllWindows() highgui.cvReleaseCapture(capture) pygame.quit() #sys.exit() break keyinput = pygame.key.get_pressed()
cv.cvFlip(frame, None, 1) cv.cvCvtColor(frame, my_grayscale, cv.CV_RGB2GRAY) cv.cvThreshold(my_grayscale, my_grayscale, 128, 255, cv.CV_THRESH_BINARY) if not blob_overlay: # Convert black-and-white version back into three-color representation cv.cvCvtColor(my_grayscale, frame, cv.CV_GRAY2RGB) myblobs = CBlobResult(my_grayscale, mask, 100, True) myblobs.filter_blobs(10, 10000) blob_count = myblobs.GetNumBlobs() for i in range(blob_count): my_enumerated_blob = myblobs.GetBlob(i) # print "%d: Area = %d" % (i, my_enumerated_blob.Area()) my_enumerated_blob.FillBlob(frame, hsv2rgb(i * 180.0 / blob_count), 0, 0) # we can now display the images highgui.cvShowImage('Blob View', frame) # handle events k = highgui.cvWaitKey(10) if k == '\x1b': # user has press the ESC key, so exit break
def main(args): global capture global hmax, hmin highgui.cvNamedWindow('Camera', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Hue', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Satuation', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Value', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Laser', highgui.CV_WINDOW_AUTOSIZE) highgui.cvMoveWindow('Camera', 0, 10) highgui.cvMoveWindow('Hue', 0, 350) highgui.cvMoveWindow('Satuation', 360, 10) highgui.cvMoveWindow('Value', 360, 350) highgui.cvMoveWindow('Laser', 700, 40) highgui.cvCreateTrackbar("Brightness Trackbar", "Camera", 0, 255, change_brightness) highgui.cvCreateTrackbar("hmin Trackbar", "Hue", hmin, 180, change_hmin) highgui.cvCreateTrackbar("hmax Trackbar", "Hue", hmax, 180, change_hmax) highgui.cvCreateTrackbar("smin Trackbar", "Satuation", smin, 255, change_smin) highgui.cvCreateTrackbar("smax Trackbar", "Satuation", smax, 255, change_smax) highgui.cvCreateTrackbar("vmin Trackbar", "Value", vmin, 255, change_vmin) highgui.cvCreateTrackbar("vmax Trackbar", "Value", vmax, 255, change_vmax) print "grabbing camera" capture = highgui.cvCreateCameraCapture(0) print "found camera" highgui.cvSetCaptureProperty(capture, highgui.CV_CAP_PROP_FRAME_WIDTH, 320) highgui.cvSetCaptureProperty(capture, highgui.CV_CAP_PROP_FRAME_HEIGHT, 240) frame = highgui.cvQueryFrame(capture) frameSize = cv.cvGetSize(frame) hsv = cv.cvCreateImage(frameSize, 8, 3) mask = cv.cvCreateImage(frameSize, 8, 1) hue = cv.cvCreateImage(frameSize, 8, 1) satuation = cv.cvCreateImage(frameSize, 8, 1) value = cv.cvCreateImage(frameSize, 8, 1) laser = cv.cvCreateImage(frameSize, 8, 1) while 1: frame = highgui.cvQueryFrame(capture) cv.cvCvtColor(frame, hsv, cv.CV_BGR2HSV) #cv.cvInRangeS(hsv,hsv_min,hsv_max,mask) cv.cvSplit(hsv, hue, satuation, value, None) cv.cvInRangeS(hue, hmin, hmax, hue) cv.cvInRangeS(satuation, smin, smax, satuation) cv.cvInRangeS(value, vmin, vmax, value) #cv.cvInRangeS(hue,0,180,hue) cv.cvAnd(hue, value, laser) #cv.cvAnd(laser, value, laser) cenX, cenY = averageWhitePoints(laser) #print cenX,cenY draw_target(frame, cenX, cenY) #draw_target(frame,200,1) highgui.cvShowImage('Camera', frame) highgui.cvShowImage('Hue', hue) highgui.cvShowImage('Satuation', satuation) highgui.cvShowImage('Value', value) highgui.cvShowImage('Laser', laser) k = highgui.cvWaitKey(10) if k == " ": highgui.cvDestroyAllWindows() highgui.cvReleaseCapture(capture) sys.exit()
def run(exposure, video=None, display=False, debug=False): if display: hg.cvNamedWindow("video", 1) hg.cvMoveWindow("video", 0, 0) if debug: hg.cvNamedWindow('right', 1) hg.cvMoveWindow("right", 800, 0) hg.cvNamedWindow("thresholded", 1) hg.cvNamedWindow('motion', 1) hg.cvNamedWindow('intensity', 1) hg.cvMoveWindow("thresholded", 800, 0) hg.cvMoveWindow("intensity", 0, 600) hg.cvMoveWindow("motion", 800, 600) if video is None: #video = cam.VidereStereo(0, gain=96, exposure=exposure) video = cam.StereoFile('measuring_tape_red_left.avi','measuring_tape_red_right.avi') frames = video.next() detector = LaserPointerDetector(frames[0], LaserPointerDetector.SUN_EXPOSURE, use_color=False, use_learning=True) detector_right = LaserPointerDetector(frames[1], LaserPointerDetector.SUN_EXPOSURE, use_color=False, use_learning=True, classifier=detector.classifier) stereo_cam = cam.KNOWN_CAMERAS['videre_stereo2'] for i in xrange(10): frames = video.next() detector.detect(frames[0]) detector_right.detect(frames[1]) lt = cv.cvCreateImage(cv.cvSize(640,480), 8, 3) rt = cv.cvCreateImage(cv.cvSize(640,480), 8, 3) for l, r in video: start_time = time.time() #l = stereo_cam.camera_left.undistort_img(l) #r = stereo_cam.camera_right.undistort_img(r) cv.cvCopy(l, lt) cv.cvCopy(r, rt) l = lt r = rt undistort_time = time.time() _, _, right_cam_detection, stats = detector_right.detect(r) if debug: draw_blobs(r, stats) draw_detection(r, right_cam_detection) hg.cvShowImage('right', r) image, combined, left_cam_detection, stats = detector.detect(l) detect_time = time.time() if debug: motion, intensity = detector.get_motion_intensity_images() show_processed(l, [combined, motion, intensity], left_cam_detection, stats, detector) elif display: #draw_blobs(l, stats) draw_detection(l, left_cam_detection) hg.cvShowImage('video', l) hg.cvWaitKey(10) if right_cam_detection != None and left_cam_detection != None: x = np.matrix(left_cam_detection['centroid']).T xp = np.matrix(right_cam_detection['centroid']).T result = stereo_cam.triangulate_3d(x, xp) print '3D point located at', result['point'].T, print 'distance %.2f error %.3f' % (np.linalg.norm(result['point']), result['error']) triangulation_time = time.time() diff = time.time() - start_time print 'Main: Running at %.2f fps, took %.4f s' % (1.0 / diff, diff)
#! /usr/bin/env python import opencv from opencv import highgui cap = highgui.cvCreateFileCapture("../c/tree.avi") img = highgui.cvQueryFrame(cap) print "Got frame of dimensions (", img.width, " x ", img.height, " )" highgui.cvNamedWindow("win", highgui.CV_WINDOW_AUTOSIZE) highgui.cvShowImage("win", img) highgui.cvMoveWindow("win", 200, 200) highgui.cvWaitKey(0)
cv.cvCalcBackProject(hue, backproject, obj_hist) cv.cvAnd(backproject, mask, backproject) #niter, track_comp, track_box = cv.cvCamShift( backproject, track_window, cv.cvTermCriteria(cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1), track_comp, track_box) track_window = track_comp.rect #if backproject_mode: # cvCvtColor( backproject, image, CV_GRAY2BGR ) if not frame.origin: track_box.angle = -track_box.angle cv.cvEllipseBox(frame, track_box, cv.CV_RGB(255, 0, 0), 3, cv.CV_AA, 0) # we can now display the images highgui.cvShowImage('Camera', frame) highgui.cvShowImage('Histogram', histimg) # handle events k = highgui.cvWaitKey(10) if k == '\x1b': # user has press the ESC key, so exit break highgui.cvReleaseCapture(capture)
def callback_image(im): t = time.time() cvim = pyrob.util.ros2cv(im) hg.cvShowImage('left', cvim) hg.cvWaitKey(5) print 'total', time.time() - t
# -*- coding:utf8 -*- import opencv from opencv import highgui as hg capture = hg.cvCreateCameraCapture(0) hg.cvNamedWindow("Snapshot") frames = [] for i in range(10): frame = hg.cvQueryFrame(capture) frames.append(opencv.cvClone(frame)) hg.cvShowImage("Snapshot", frame) hg.cvWaitKey(1000) hg.cvNamedWindow("hello") for i in range(10): hg.cvShowImage("hello", frames[i]) hg.cvWaitKey(1000) """ import copy dst=copy.copy(frames[1]) opencv.cvSub(frames[2], frames[1], dst) hg.cvShowImage("Snapshot", dst) from IPython.Shell import IPShellEmbed IPShellEmbed()() hg.cvWaitKey(10000) """