def __init__(self, device=0): self.__capture = cv.CaptureFromCAM(device) self.working = True if not self.__capture: print "Camera not working" self.working = False
import cv capture = cv.CaptureFromCAM(-1) #-1 will select the first camera available, usually /dev/video0 on linux cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, 320) cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 240) im = cv.QueryFrame(capture) cv.SaveImage("/var/lib/cloud9/Dish-Detector/sink-latest.jpg", im) edges = cv.CreateImage(cv.GetSize(im), cv.IPL_DEPTH_8U, 1) #convert the image to grayscale cv.CvtColor(im, edges, cv.CV_BGR2GRAY) #edge detect it, then smooth the edges #cv.Canny(edges, edges, 5, 300, 3) #cv.Smooth(edges, edges, cv.CV_GAUSSIAN, 5, 5) thresh = 100 cv.Canny(edges, edges, thresh, thresh / 2, 3) cv.Smooth(edges, edges, cv.CV_GAUSSIAN, 3, 3) cv.SaveImage("/var/lib/cloud9/Dish-Detector/sink-latest-edges.jpg", edges) #create storage for hough cirlces storage = cv.CreateMat(640, 1, cv.CV_32FC3) #find the circles, most of these parameters are magic numbers that work well enough for where the camera is installed #cv.HoughCircles(edges, storage, cv.CV_HOUGH_GRADIENT, 2, 50, 5, 300) cv.HoughCircles(edges, storage, cv.CV_HOUGH_GRADIENT, 2, edges.width / 10, thresh, 350, 0, 0) f = open("/var/lib/cloud9/Dish-Detector/sink-empty.txt", "r") drains = []
import cv import numpy as np import serial # initialization of camera and serial port conenction capture = cv.CaptureFromCAM(3) ser = serial.Serial("/dev/ttyUSB1") ser.baudrate = 115200 # s - straight; b - backwards; f - forward; l - left; r - right direction = "s" ser.write("b"); ser.write("b"); ser.write(direction); frame = cv.QueryFrame(capture) length = frame.width mid = [round(length/2-0.5),round(length/2+0.5)] loopCounter = 0 while True: loopCounter = loopCounter + 1 leftCount = 0 rightCount = 0 frame = cv.QueryFrame(capture) k = cv.WaitKey(10); if(loopCounter > 20): # "confirm" direction from time to time in case of possible error ser.write(direction) print "send direction: ", direction
def main(debug=False, fromCam=False): # threshold for canny edge detect thresh = 200 #min and max radius for the plughole plugradius = [0, 0] #coordinates of the plughole [coord, +-tolerance] sinkx = [0, 0] sinky = [0, 0] #load the sink defs from settings f = open("/var/lib/cloud9/Dish-Detector/settings", "r") for line in f: tok = line.split("=") if tok[0] == "radius": plugradius = [int(p) for p in tok[1].split(",")] elif tok[0] == "x": sinkx = [int(p) for p in tok[1].split(",")] elif tok[0] == "y": sinky = [int(p) for p in tok[1].split(",")] print "sink at: " + str(sinkx[0]) + ":" + str(sinky[0]) # get an image from our source im = None if fromCam == True: capture = cv.CaptureFromCAM( -1 ) #-1 will select the first camera available, usually /dev/video0 on linux cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, 320) cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 240) im = cv.QueryFrame(capture) else: im = cv.LoadImage(sys.argv[1]) #work out the brightness of the image bright = getBrightness(im) print "image brightness = ", bright #lets see if its too dark and we should shut the alarms up if bright < 30: #alarm = alarms() #alarm.stopAllAlarms() print "Stopping all alarms as its night time, alarms count will continue in the morning" exit() #create grayscale and edge storage gray = cv.CreateImage(cv.GetSize(im), cv.IPL_DEPTH_8U, 1) edges = cv.CreateImage(cv.GetSize(im), cv.IPL_DEPTH_8U, 1) #convert the image to grayscale cv.CvtColor(im, gray, cv.CV_BGR2GRAY) #edge detect it, then smooth the edges cv.Canny(gray, edges, thresh, thresh / 2, 3) cv.Smooth(edges, gray, cv.CV_GAUSSIAN, 3, 3) #create storage for hough cirlces storage = cv.CreateMat(640, 1, cv.CV_32FC3) #find the circles, most of these parameters are magic numbers that work well enough for where the camera is installed cv.HoughCircles(gray, storage, cv.CV_HOUGH_GRADIENT, 2, gray.width / 18, thresh, 300, 0, 0) #how many circles have we detected? detectedCircles = 0 #for each circle detected... for i in range(storage.rows): val = storage[i, 0] #because numpy arrays are retarded radius = int(val[2]) center = (int(val[0]), int(val[1])) print "circular feature at: " + str(center), "size: ", str(radius) sinkFound = False #try and classify this as sink if sinkx[0] - sinkx[1] < center[0] < sinkx[0] + sinkx[1]: if sinky[0] - sinky[1] < center[1] < sinky[0] + sinky[1]: #plugradius is now min/max if plugradius[0] < radius < plugradius[1]: print "..probably the PLUGHOLE" cv.Circle(im, center, radius, (255, 0, 255), 3, 8, 0) sinkFound = True else: print "..PH failed radius check" else: print "..PH failed Y check" else: print "..PH failed X check" if not sinkFound: print "..probably some unwashed dish" detectedCircles = detectedCircles + 1 cv.Circle(im, center, radius, (0, 0, 255), 3, 8, 0) print "detected circle: ", detectedCircles #create our alarm object to trigger annoyances #alarm = alarms() if detectedCircles > 0: #read the last status from the file. Update it to now #also consult the Table-o-Annoyance(tm) to see if we set off an alarm/explosion #lots of this could be tidied up but I DONT CARE f = open("status", "r") stat = f.readline().strip() f.close() if stat == "clean": print "last status was : " + stat + ", changing it to DIRTY" f = open("status", "w") f.write("DIRTY") f.close() #alarm.doAlarm(0) else: #just update the dishcounter print "updating dish counter" f = open("dishcount", "r") ct = int(f.readline().strip()) f.close() ct = ct + 1 f = open("dishcount", "w") f.write(str(ct)) f.close() else: print "Last status was dirty and now we're CLEAN" f = open("status", "w") f.write("clean") f.close() print "resetting dishcount" f = open("dishcount", "w") f.write("0") f.close() # kill ALL the alarms \o/ #alarm.stopAllAlarms() #if debugging then display each stage of the process in a cv windows. Useful when configuring things if debug: cv.NamedWindow('Circles') cv.ShowImage('Circles', im) cv.WaitKey(0) cv.ShowImage('Circles', edges) cv.WaitKey(0) else: cv.SaveImage("/var/lib/cloud9/Dish-Detector/dishes.jpg", im)
import cv import datetime imageWidth = 1024 imageHeight = 480 cv.NamedWindow("window1", cv.CV_WINDOW_AUTOSIZE) camera_index = 0 capture = cv.CaptureFromCAM(camera_index) cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, imageWidth) cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, imageHeight) font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX_SMALL, 0.5, 0.5, 0, 1, cv.CV_AA) while True: frame = cv.QueryFrame(capture) cv.Rectangle(frame, (0, 0), (imageWidth, 15), (255, 255, 255), cv.CV_FILLED, 8, 0) timeStampString = datetime.datetime.now().strftime("%A %Y-%m-%d %I:%M %p") cv.PutText(frame, timeStampString, (10, 10), font, (0, 0, 0)) cv.ShowImage("window1", frame) command = cv.WaitKey(10) if command == ord('q'): print("Ending program") break elif command == ord('s'): print("Saving image")
# Make two windows, these are refered to by name cv.NamedWindow("cam_live", cv.CV_WINDOW_AUTOSIZE) cv.NamedWindow("cam_other", cv.CV_WINDOW_AUTOSIZE) cv.NamedWindow("cam_histo", cv.CV_WINDOW_AUTOSIZE) CAM_CFG = { 'camidx': 0, 'frameidx': 0, 'handler': None, 'buf': [None] * 10, 'roi': roi } # Initialize camera handler CAM_CFG['handler'] = cv.CaptureFromCAM(CAM_CFG['camidx']) rawframe = cv.QueryFrame(CAM_CFG['handler']) frame = cv.GetSubRect(rawframe, roi) # Make fake calibration frames from data darkarr = (np.random.random(cv.GetDims(frame)) * 10.0 / 256).astype(np.float32) darkframe = array2cv(darkarr) lastframe = cv.CloneImage(darkframe) camframe = cv.CloneImage(darkframe) diffframe = cv.CloneImage(darkframe) # Make real flat field print "Taking 100 flats..." frame = cv.GetSubRect(cv.QueryFrame(CAM_CFG['handler']), CAM_CFG['roi']) cv.ConvertScale(frame, camframe, scale=1.0 / 256)
#!/usr/bin/env python import sys import math import optparse import cv import serial import time cam = cv.CaptureFromCAM(0) cv.SetCaptureProperty(cam, cv.CV_CAP_PROP_FRAME_WIDTH, 960) cv.SetCaptureProperty(cam, cv.CV_CAP_PROP_FRAME_WIDTH, 720) img = cv.QueryFrame(cam) width, height = cv.GetSize(img) if __name__ == "__main__": while True: line = [(width / 2, 0), (width / 2, height), (0, height / 2), (width, height / 2)] cv.PolyLine(img, [line], False, (255, 0, 0), 2, 8) key = (cv.WaitKey(10)) % 0x100 cv.ShowImage('source', img) img = cv.QueryFrame(cam) if key == 27: break
def colorEdgeDetector(): # Declare as globals since we are assigning to them now global capture global capture_index # Declare tesseract variables api = tesseract.TessBaseAPI() api.Init(".", "eng", tesseract.OEM_DEFAULT) api.SetPageSegMode(tesseract.PSM_SINGLE_LINE) # Variable to control size of peeking window xDiff = 120.0 yDiff = 40.0 rectArea = int(xDiff * 2) * int(yDiff * 2) # Capture current frame frame = cv.QueryFrame(capture) # Declare other variables used to manipulate the frame threshold_frame = cv.CreateImage(cv.GetSize(frame), 8, 1) hsv_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) while True: # Copy the original frame to display later original_frame = cv.CloneImage(frame) # Use Sobel filter to detect edges cv.Smooth(frame, frame, cv.CV_BLUR, 3, 3) cv.Sobel(frame, frame, 2, 0, 5) cv.Smooth(frame, frame, cv.CV_BLUR, 3, 3) # Convert frame to HSV cv.CvtColor(frame, hsv_frame, cv.CV_BGR2HSV) # Remove all pixels that aren't a teal color: RGB(0, 180, 170) HSV(170, 75, 100) cv.InRangeS(hsv_frame, (70, 150, 150), (100, 255, 255), threshold_frame) # Get moments to see if what was found is a license plate or not moments = cv.Moments(cv.GetMat(threshold_frame, 1), 0) area = cv.GetCentralMoment(moments, 0, 0) if (area > 60000): # Determine the x and y coordinates of the center of the object x = cv.GetSpatialMoment(moments, 1, 0) / area y = cv.GetSpatialMoment(moments, 0, 1) / area # Retrieve candidate license plate and test it's characters if int(x - xDiff) > 0 and int(y - yDiff) > 0 and ( int(x - xDiff) + int(xDiff * 2)) < 640 and ( int(y - yDiff) + int(yDiff * 2)) < 480: candidate = cv.GetSubRect(original_frame, (int(x - xDiff), int(y - yDiff), int(xDiff * 2), int(yDiff * 2))) candidateImg = cv.CreateImage(cv.GetSize(candidate), 8, 3) cv.Convert(candidate, candidateImg) candidateGrey = cv.CreateImage(cv.GetSize(candidate), 8, 1) cv.CvtColor(candidateImg, candidateGrey, cv.CV_RGB2GRAY) tesseract.SetCvImage(candidateImg, api) text = api.GetUTF8Text() print "License Plate Characters:", text """ # Regex and this don't seem to work if text.isalnum(): print "License Plate Characters:",text """ # Draw circle on center of object cv.Circle(original_frame, (int(x), int(y)), 2, (255, 255, 255), 10) # Rectangle cv.Rectangle(original_frame, (int(x - xDiff), int(y - yDiff)), (int(x + xDiff), int(y + yDiff)), cv.CV_RGB(0, 0, 255), 1) # Display image cv.ShowImage("CS201 - Tyler Boraski - Final Project", original_frame) # Capture new frame frame = cv.QueryFrame(capture) # If wrong camera index is initialized, press "n" to cycle through camera indexes. c = cv.WaitKey(10) if c == "n": camera_index += 1 # Try the next camera index capture = cv.CaptureFromCAM(camera_index) if not capture: # If the next camera index didn't work, reset to 0. camera_index = 0 capture = cv.CaptureFromCAM(camera_index) # If "esc" is pressed the program will end esc = cv.WaitKey(7) % 0x100 if esc == 27: quit()
def callback(data): print "data: " + data.data global height global width global capture global capture1 global fps global cam global check_fps_set global sub string = str(data.data) b = string.split(',') height = str(b[2]) width = str(b[3]) prev_cam = cam cam = str(b[0]) fps = str(b[1]) if prev_cam == str(2): #pass sub.unregister() if cam == str(0): capture = None capture1 = None capture = cv.CaptureFromCAM(0) #sub.unregister() if cam == str(1): capture = None capture1 = None capture = cv.CaptureFromCAM(1) #sub.unregister() '''if cam==str(2): capture=None capture1=None capture=cv.CaptureFromCAM(2) sub.unregister()''' if cam == str(3): capture = None capture1 = None capture = cv.CaptureFromCAM(3) #sub.unregister() if cam == str(2): capture = None capture1 = None sub = rospy.Subscriber('/image_raw/compressed', CompressedImage, callback1, queue_size=1) check_fps_set = 1 return if cam == str(4): capture = None capture1 = None #sub.unregister() capture = cv.CaptureFromCAM(0) capture1 = cv.CaptureFromCAM(1) check_fps_set = 1 return check_fps_set = 1 #print "prev_cam: "+str(prev_cam)+" new cam: "+str(cam) cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, int(width)) cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, int(height))
def __init__(self): self.capture = cv.CaptureFromCAM(0) self.measureDisplay = True self.saveSamples = True self.NoOfFrames = 3
from sensor_msgs.msg import Image from sensor_msgs.msg import CompressedImage from cv_bridge import CvBridge, CvBridgeError import time import os from subprocess import call import re cam_no = [7, 6, 5, 4] List = [str(i) for i in cam_no] height = str(640) width = str(480) fps = str(30) cam = "0" capture = cv.CaptureFromCAM(int(List[0])) # capture=cv.CaptureFromCAM(1) capture1 = None sub = None frame = None prev_cam = '0' check_fps_set = 0 def callback1(ros_data): #pub = rospy.Publisher('/chatter', Image) #bridge=CvBridge() #print 'received image data of type:%s'% ros_data.format global sub global frame global cam
import cv CAMERA_INDEX = 0 # CODEC = cv.CV_FOURCC('D','I','V','3') # MPEG 4.3 # CODEC = cv.CV_FOURCC('M','P','4','2') # MPEG 4.2 # CODEC = cv.CV_FOURCC('M','J','P','G') # Motion Jpeg # CODEC = cv.CV_FOURCC('U','2','6','3') # H263 # CODEC = cv.CV_FOURCC('I','2','6','3') # H263I # CODEC = cv.CV_FOURCC('F','L','V','1') # FLV CODEC = cv.CV_FOURCC('P', 'I', 'M', '1') # MPEG-1 CODEC = cv.CV_FOURCC('D', 'I', 'V', 'X') # MPEG-4 = MPEG-1 # Initialize the camera for video capture capture = cv.CaptureFromCAM(CAMERA_INDEX) # Initialize the video writer to write the file writer = cv.CreateVideoWriter( '/Users/sean/Desktop/out.avi', # Filename CODEC, # Codec for compression 25, # Frames per second (640, 480), # Width / Height tuple True # Color flag ) # Capture 50 frames and write each one to the file for i in range(0, 25): print 'frame #:', i frame = cv.QueryFrame(capture) cv.ShowImage("w1", frame) cv.WriteFrame(writer, frame)
def on_module_selected(self): if not self.loaded: print "Loading User module" self.face_button = PictureChooserButton(num_cols=4, button_picture_size=48, menu_pictures_size=96) self.face_button.set_alignment(0.0, 0.5) self.face_button.set_tooltip_text( _("Click to change your picture")) self.face_photo_menuitem = Gtk.MenuItem.new_with_label( _("Take a photo...")) self.face_photo_menuitem.connect( 'activate', self._on_face_photo_menuitem_activated) self.face_browse_menuitem = Gtk.MenuItem.new_with_label( _("Browse for more pictures...")) self.face_browse_menuitem.connect( 'activate', self._on_face_browse_menuitem_activated) face_dirs = ["/usr/share/cinnamon/faces"] for face_dir in face_dirs: if os.path.exists(face_dir): pictures = sorted(os.listdir(face_dir)) for picture in pictures: path = os.path.join(face_dir, picture) self.face_button.add_picture( path, self._on_face_menuitem_activated) self.realname_entry = EditableEntry() self.realname_entry.connect("changed", self._on_realname_changed) self.realname_entry.set_tooltip_text( _("Click to change your name")) table = Gtk.Table.new(3, 2, False) table.set_row_spacings(8) table.set_col_spacings(15) self.sidePage.add_widget(table) label_picture = Gtk.Label.new(_("Picture:")) label_picture.set_alignment(1, 0.5) label_picture.get_style_context().add_class("dim-label") table.attach(label_picture, 0, 1, 0, 1) password_mask = Gtk.Label.new( u'\u2022\u2022\u2022\u2022\u2022\u2022') password_mask.set_alignment(0.0, 0.5) self.password_button = Gtk.Button() self.password_button.add(password_mask) self.password_button.set_relief(Gtk.ReliefStyle.NONE) self.password_button.set_tooltip_text( _("Click to change your password")) self.password_button.connect('activate', self._on_password_button_clicked) self.password_button.connect('released', self._on_password_button_clicked) label_name = Gtk.Label.new(_("Name:")) label_name.set_alignment(1, 0.5) label_name.get_style_context().add_class("dim-label") table.attach(label_name, 0, 1, 1, 2) label_name = Gtk.Label.new(_("Password:"******"dim-label") table.attach(label_name, 0, 1, 2, 3) box = Gtk.Box() box.pack_start(self.face_button, False, False, 0) table.attach(box, 1, 2, 0, 1, xoptions=Gtk.AttachOptions.EXPAND | Gtk.AttachOptions.FILL) table.attach(self.realname_entry, 1, 2, 1, 2, xoptions=Gtk.AttachOptions.EXPAND | Gtk.AttachOptions.FILL) table.attach(self.password_button, 1, 2, 2, 3, xoptions=Gtk.AttachOptions.EXPAND | Gtk.AttachOptions.FILL) current_user = GLib.get_user_name() self.accountService = AccountsService.UserManager.get_default( ).get_user(current_user) self.accountService.connect('notify::is-loaded', self.load_user_info) self.face_button.add_separator() webcam_detected = False try: import cv capture = cv.CaptureFromCAM(-1) for i in range(10): img = cv.QueryFrame(capture) if img != None: webcam_detected = True except Exception, detail: print detail if (webcam_detected): self.face_button.add_menuitem(self.face_photo_menuitem) self.face_button.add_menuitem(self.face_browse_menuitem) else: self.face_button.add_menuitem(self.face_browse_menuitem)
def main(): color_tracker_window = "output" thresh_window = "thresh" capture = cv.CaptureFromCAM(-1) cv.NamedWindow(color_tracker_window, 1) cv.MoveWindow(color_tracker_window, 0, 0) cv.NamedWindow(thresh_window, 1) cv.MoveWindow(thresh_window, 700, 0) imgScrible = None storage = None global posX global posY fido.init_servos() fido.set_servo(fido.NECK, NECK_DOWN) head_x = fido.get_servo_position(fido.HEAD) neck_y = fido.get_servo_position(fido.NECK) jaw_pos = fido.get_servo_position(fido.JAW) #frame = cv.QueryFrame(capture) #imgThresh = GetThresholdedImage(frame) for f in xrange(2000): frame = cv.QueryFrame(capture) #cv.Smooth(frame, frame, cv.CV_BLUR, 3) cv.Smooth(frame, frame, cv.CV_GAUSSIAN, 9, 9) #imgScrible = cv.CreateImage(cv.GetSize(frame), 8, 3) imgThresh = GetThresholdedImage(frame) # pre-smoothing improves Hough detector #if storage is None: # storage = cv.CreateMat(imgThresh.width, 1, cv.CV_32FC3) #try: # cv.HoughCircles(imgThresh, storage, cv.CV_HOUGH_GRADIENT, 1, imgThresh.height/4, 50, 20, 10, 240) # circles = np.asarray(storage) #except Error, e: # print e # circles = None # find largest circle #maxRadius = 0 #x = 0 #y = 0 #found = False #if circles is not None: # for i in range(len(circles)): # circle = circles[i] # if circle[2] > maxRadius: # found = True # maxRadius = circle[2] # x = circle[0] # y = circle[1] #cvShowImage( 'Camera', frame) #if found: # posX = x # posY = y # print 'ball detected at position: ',x, ',', y, ' with radius: ', maxRadius #else: # print 'no ball' mat = cv.GetMat(imgThresh) #Calculating the moments moments = cv.Moments(mat, 0) area = cv.GetCentralMoment(moments, 0, 0) moment10 = cv.GetSpatialMoment(moments, 1, 0) moment01 = cv.GetSpatialMoment(moments, 0, 1) #lastX and lastY stores the previous positions lastX = posX lastY = posY #Finding a big enough blob if area > 20000: #Calculating the coordinate postition of the centroid posX = int(moment10 / area) posY = int(moment01 / area) print str(datetime.now( )), ' x: ' + str(posX) + ' y: ' + str(posY) + ' area: ' + str( area) + ' head_x: ' + str(head_x) + ' neck_y: ' + str( neck_y) + ' jaw_pos: ' + str(jaw_pos) #drawing lines to track the movement of the blob if (lastX > 0 and lastY > 0 and posX > 0 and posY > 0): #cv.Circle( imgThresh, (posX, posY), maxRadius, cv.Scalar(0,0,255), 3, 8, 0 ); #cv.Line(imgScrible, (posX, posY), (lastX, lastY), cv.Scalar(0, 0, 255), 5) if posX < CENTER_X - 10: error_x = (posX - CENTER_X) / MAX_X * (HEAD_RIGHT - HEAD_LEFT) desired_x = int(error_x) / 4 + head_x head_x = desired_x if head_x < HEAD_LEFT: head_x = HEAD_LEFT fido.set_servo(fido.HEAD, head_x) elif posX > CENTER_X + 10: new_x = (posX - CENTER_X) / MAX_X * (HEAD_RIGHT - HEAD_LEFT) head_x = int(new_x) / 4 + head_x if head_x > HEAD_RIGHT: head_x = HEAD_RIGHT fido.set_servo(fido.HEAD, head_x) if posY < CENTER_Y - 10: new_y = (posY - CENTER_Y) / MAX_Y * (NECK_UP - NECK_DOWN) neck_y = neck_y - (int(new_y) / 8) if neck_y > NECK_UP: neck_y = NECK_UP fido.set_servo(fido.NECK, neck_y) elif posY > CENTER_Y + 10: new_y = (posY - CENTER_Y) / MAX_Y * (NECK_UP - NECK_DOWN) neck_y = neck_y - (int(new_y) / 8) if neck_y < NECK_DOWN: neck_y = NECK_DOWN fido.set_servo(fido.NECK, neck_y) jaw_pos = int((float(area) - 60000.0) / 1000000.0 * (fido.JAW_OPEN - fido.JAW_CLOSED_EMPTY) + fido.JAW_CLOSED_EMPTY) jaw_pos = max(min(jaw_pos, fido.JAW_OPEN), fido.JAW_CLOSED_EMPTY) fido.set_servo(fido.JAW, jaw_pos) #Adds the three layers and stores it in the frame #frame -> it has the camera stream #imgScrible -> it has the line tracking the movement of the blob #cv.Add(frame, imgScrible, frame) cv.ShowImage(thresh_window, imgThresh) cv.ShowImage(color_tracker_window, frame) c = cv.WaitKey(10) if (c != -1): break print "max frames reached, exiting"
def __init__(self): self.camera = cv.CaptureFromCAM(-1)
downUpdateTrackbarPos = False listenToSlider = False # this function will be called when the trackbar is updated def onTrackbarSlide(pos): if (listenToSlider): downUpdateTrackbarPos = True cv.SetCaptureProperty(g_capture, cv.CV_CAP_PROP_POS_FRAMES, pos) downUpdateTrackbarPos = False if __name__ == '__main__': cv.NamedWindow("Example3", cv.CV_WINDOW_AUTOSIZE) if (len(sys.argv) == 1): g_capture = cv.CaptureFromCAM(0) useTrackbar = False else: g_capture = cv.CreateFileCapture(sys.argv[1]) useTrackbar = True frames = long( cv.GetCaptureProperty(g_capture, cv.CV_CAP_PROP_FRAME_COUNT)) if (frames == 0): print "No video" loop = False else: # create the trackbar and attach the function "onTrackbarSlide" to it loop = True cv.CreateTrackbar("Position", "Example3", g_slider_pos, frames, onTrackbarSlide)
# threshold the image image_threshed = cv.CreateImage(cv.GetSize(image), image.depth, 1) cv.InRangeS(image_hsv, MIN_THRESH, MAX_THRESH, image_threshed) return image_threshed if FOLLOW: try: panTilt = Serial(SERIAL_DEVICE, 9600) panTilt.write('r') except: print 'Serial() failed' exit(1) # initialize camera feed capture = cv.CaptureFromCAM(MY_CAMERA) if not capture: print "Could not initialize camera feed!" exit(1) # create display windows cv.NamedWindow('camera', cv.CV_WINDOW_AUTOSIZE) cv.NamedWindow('threshed', cv.CV_WINDOW_AUTOSIZE) # initialize position array positions_x, positions_y = [0] * SMOOTHNESS, [0] * SMOOTHNESS # read from the camera while 1: image = cv.QueryFrame(capture) if not image:
def __init__(self): cv.NamedWindow(color_tracker_window, 1) self.capture = cv.CaptureFromCAM(0)
def __init__(self): self.capture = cv.CaptureFromCAM(0) cv.NamedWindow(“Target”, 1) cv.NamedWindow(“Threshold1”,1) cv.NamedWindow(“Threshold2”,1) cv.NamedWindow(“hsv”,1)
#!/usr/bin/env python import roslib roslib.load_manifest('kec_line') import sys import rospy import cv from std_msgs.msg import String from sensor_msgs.msg import Image from cv_bridge import CvBridge, CvBridgeError pub = rospy.Publisher('cam', Image) rospy.init_node('camera_driver') cam = cv.CaptureFromCAM(201) bridge = CvBridge() #r = rospy.Rate(1) #run the camera at 10Hz to mimic laser while not rospy.is_shutdown(): frame = cv.QueryFrame(cam) print frame.width, frame.height pub.publish(bridge.cv_to_imgmsg(frame, "bgr8")) #r.sleep() #cv.WaitKey(10)
def __init__(self): self.logger = Logger.Logger(self.__class__.__name__).get() self.logger.debug("initializing camera") cam = cv.CaptureFromCAM(self.KAMERA_NR)
def run_real_time_recognition(para_path, Labels): status_dictionary = {} # status, pos, radias, color, text, ,pos, font_color # states: # 0 -> waiting to be hovered # 1 -> hovered waiting to be selected(clicked) # 2 -> selected waiting to be unselected(clicked) start_time = 0 status_dictionary['b1'] = [False, (530, 70), 60, (255, 255, 0), 'Record', (490, 70), (0,0,0), [], False] status_dictionary['b2'] = [False, (380, 70), 60, (0, 255, 0), 'Select', (350, 70), (0,0,0), [], False] status_dictionary['b3'] = [False, (240, 70), 60, (0, 255, 255), 'Billard', (210, 70),(0,0,0), [], False] status_dictionary['b4'] = [False, (100, 270), 90, (255, 255, 255), 'Drag Me', (70, 270),(0,0,0), [], False] global depth,ir, rgb count = 0 # frame_size = (480,640) # Setting web cam config capture=cv.CaptureFromCAM(0) fourcc = cv.CV_FOURCC('X','V','I','D') cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FPS, 25) cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, 640) cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 480) # Neuronet Configuration resize_row = 20 resize_width = 20 weights = loadmat(para_path) T1 = weights['Theta1'] T2 = weights['Theta2'] history_prediction = [] # smoothing and other purpose history_gesture_pos = [] # smoothing and other purpose # Recording record_st = False rgb_writer = cv.CreateVideoWriter("recording.avi", cv.CV_FOURCC('X','V','I','D'), 5, (640, 480), True) # Capture frames IR, RGB, Depth while True: # Web cam feed (300, 400, 3) rgb_ipl = cv.QueryFrame(capture) # Depth IR feed (depth,_), (ir,_) = get_depth(), get_video(format=2) ir = (ir>150).astype(float) ir = ir*255 ir_ipl = resize_ir_callibrate_with_rgb(ir) new_rgb_ipl = cv.CreateImage(cv.GetSize(rgb_ipl), 8, 3) #Billard Mode yo = rgb_ipl f = iplimage_to_numpy_color(yo) green_mono = f[:,:,1] #image = cv.fromarray(np.array(green_mono[:,:])) #cv.ShowImage('G', image) rgb_np, threshold_np, contour_list = billard_extract_and_draw_countour(f, 20, green_mono, 120, 0) image = cv.fromarray(np.array(rgb_np)) #print contour_list maxx = (0,0,0,0) for pos in contour_list: if pos[1] > maxx[1]: maxx = pos #print maxx for item in contour_list: if maxx != item: cv.Line(image, (maxx[0]+maxx[2]/2, maxx[1]+maxx[3]/2), (item[0]+item[2]/2,item[1]+item[3]/2), (0,255,0), thickness=1, lineType=8, shift=0) #cv.ShowImage('G Threshold', image) new_rgb_ipl = cvMat_to_iplimage_color(image) #cv.ShowImage('G Threshold', new_rgb_ipl) # Hand Sengmentation rgb_np, ir_np, contour_list, history_gesture_pos = real_time_extract_and_draw_countour(ir_ipl, rgb_ipl, 20000, history_gesture_pos) # Gesture Recognition if contour_list: ir_ipl, rgb_ipl, history_prediction = real_time_gesture_recognition_and_labeling(ir_np, rgb_np, contour_list, T1, T2, Labels, history_prediction, False) # Update button status status_dictionary, start_time = update_button_status(contour_list, history_prediction, Labels, status_dictionary, history_gesture_pos, False, start_time) draw_menu_button(ir_ipl, rgb_ipl, status_dictionary, start_time) # resize for full screen display """ rgb_np = iplimage_to_numpy_color(rgb_ipl) rgb_np = imresize(rgb_np, (800, 1066)) image = cv.fromarray(np.array(rgb_np)) cv.ShowImage('rgb', image) """ if status_dictionary['b3'][0]: opacity = 0.4 cv.AddWeighted(new_rgb_ipl, opacity, rgb_ipl, 1 - opacity, 0, rgb_ipl) if status_dictionary['b1'][0]: cv.WriteFrame(rgb_writer, rgb_ipl) else: record_status=False cv.ShowImage('rgb', rgb_ipl) cv.ShowImage('ir', ir_ipl) c=cv.WaitKey(5) if c==27: #Break if user enters 'Esc'. break
def __init__(self): self.capture = cv.CaptureFromCAM(0) cv.NamedWindow("Object", 1)
if not new_apenom: def_nia = nia continue new_apenom = new_apenom.upper() new_apenom = new_apenom.strip() else: new_email = get_text( None, first_row[0].decode('utf-8') + '\nDNI: ' + nia + '\nNIA: ' + first_row[1] + '\n Introduzca email:', first_row[2]) cur.close() db.close() capture = cv.CaptureFromCAM(webcam) # cv.SetCaptureProperty(capture,cv.CV_CAP_PROP_FRAME_WIDTH,1280) # cv.SetCaptureProperty(capture,cv.CV_CAP_PROP_FRAME_HEIGHT, 960); cv.NamedWindow(nia) cv.MoveWindow(nia, xcam, ycam) storage = cv.CreateMemStorage() cascade = cv.Load( '/usr/share/webcam-ds-common/haarcascade_frontalface_alt.xml') # get current limits face_max_x = int( cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH)) face_max_y = int( cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT))
import cv import os #Configuracoes : filtro_de_gauss = 3 tolerancia = 40 filtro_de_dilatacao = 4 filtro_de_erosao = 2 resolucao_largura = 640 resolucao_altura = 480 captura = cv.CaptureFromCAM(1) cv.SetCaptureProperty(captura, cv.CV_CAP_PROP_FRAME_WIDTH, resolucao_largura) cv.SetCaptureProperty(captura, cv.CV_CAP_PROP_FRAME_HEIGHT, resolucao_altura) cv.NamedWindow("Webcam", 1) cv.NamedWindow("Mascara", 0) cv.NamedWindow("Binario", 0) cv.NamedWindow("Regiao de Interesse", 1) cv.MoveWindow("Regiao de Interesse", 1000, 480) cv.MoveWindow("Mascara", 0, 500) cv.MoveWindow("Binario", 400, 500) arquivo = open('Treino.txt', 'a') mascara = cv.CreateImage((resolucao_largura, resolucao_altura), 8, 3) cinza = cv.CreateImage((resolucao_largura, resolucao_altura), 8, 1) while True: print("Por Favor tire uma foto do fundo estatico do seu video.") print("Aperte a tecla espaco.") if cv.WaitKey(0) % 0x100 == 32:
def init_globals(): """ sets up the data we need in the global dictionary D """ # get D so that we can change values in it global D D.goal = (0, 0) D.target_vector = (0, 0) D.heading_vector = 0.0 # our publishers, D.K_PUB D.K_PUB = rospy.Publisher('text_data', String) D.V_PUB = rospy.Publisher('visual_data', String) D.G_PUB = rospy.Publisher('goal_data', String) rospy.Subscriber('heading_data', String, heading_callback) # put threshold values into D D.thresholds = { 'high_green': 141, 'low_green': 0, 'low_blue': 108, 'low_val': 106, 'high_hue': 204, 'high_val': 255, 'high_blue': 255, 'low_red': 160, 'low_hue': 113, 'high_red': 255, 'high_sat': 255, 'low_sat': 130 } # Set up the windows containing the image from the camera, # the altered image, and the threshold sliders. cv.NamedWindow('image') cv.MoveWindow('image', 0, 0) cv.NamedWindow('threshold') THR_WIND_OFFSET = 640 if D.half_size: THR_WIND_OFFSET /= 2 cv.MoveWindow('threshold', THR_WIND_OFFSET, 0) cv.NamedWindow('sliders') SLD_WIND_OFFSET = 1280 if D.half_size: SLD_WIND_OFFSET /= 2 cv.MoveWindow('sliders', SLD_WIND_OFFSET, 0) # Create the sliders within the 'sliders' window cv.CreateTrackbar('low_red', 'sliders', D.thresholds['low_red'], 255, lambda x: change_slider('low_red', x)) cv.CreateTrackbar('high_red', 'sliders', D.thresholds['high_red'], 255, lambda x: change_slider('high_red', x)) cv.CreateTrackbar('low_green', 'sliders', D.thresholds['low_green'], 255, lambda x: change_slider('low_green', x)) cv.CreateTrackbar('high_green', 'sliders', D.thresholds['high_green'], 255, lambda x: change_slider('high_green', x)) cv.CreateTrackbar('low_blue', 'sliders', D.thresholds['low_blue'], 255, lambda x: change_slider('low_blue', x)) cv.CreateTrackbar('high_blue', 'sliders', D.thresholds['high_blue'], 255, lambda x: change_slider('high_blue', x)) cv.CreateTrackbar('low_hue', 'sliders', D.thresholds['low_hue'], 255, lambda x: change_slider('low_hue', x)) cv.CreateTrackbar('high_hue', 'sliders', D.thresholds['high_hue'], 255, lambda x: change_slider('high_hue', x)) cv.CreateTrackbar('low_sat', 'sliders', D.thresholds['low_sat'], 255, lambda x: change_slider('low_sat', x)) cv.CreateTrackbar('high_sat', 'sliders', D.thresholds['high_sat'], 255, lambda x: change_slider('high_sat', x)) cv.CreateTrackbar('low_val', 'sliders', D.thresholds['low_val'], 255, lambda x: change_slider('low_val', x)) cv.CreateTrackbar('high_val', 'sliders', D.thresholds['high_val'], 255, lambda x: change_slider('high_val', x)) # Set the method to handle mouse button presses cv.SetMouseCallback('image', onMouse, None) # We have not created our "scratchwork" images yet D.created_images = False # Variable for key presses D.last_key_pressed = 255 # Create a connection to the Kinect D.bridge = cv_bridge.CvBridge() # camera for missile launcher D.camera = cv.CaptureFromCAM(-1)
def mainLoop(): input_video_fn = get_input_video_filename() print 'input video filename:', input_video_fn # Setting up the window objects and environment proc_win_name = "Processing window" cam_win_name = "Capture from camera" proc_win = cv.NamedWindow(proc_win_name, 1) cam_win = cv.NamedWindow(cam_win_name, 1) if input_video_fn: cam = cv.CaptureFromFile(input_video_fn) else: cam = cv.CaptureFromCAM(0) cv.SetMouseCallback(proc_win_name, handle_mouse) cv.SetMouseCallback(cam_win_name, handle_mouse) msdelay = 3 initHueThreshold = 42 initIntensityThreshold = 191 skin_detector = skin.SkinDetector() skin_detector.setHueThreshold(initHueThreshold) skin_detector.setIntensityThreshold(initIntensityThreshold) cv.CreateTrackbar('hueThreshold', proc_win_name, initHueThreshold, 255, skin_detector.setHueThreshold) cv.CreateTrackbar('intensityThreshold', proc_win_name, initIntensityThreshold, 255, skin_detector.setIntensityThreshold) session = ImageProcessSession(skin_detector) ga = gesture.GestureAnalyzer() grammar = Grammar() gfn = get_grammar_filename() if not gfn: print 'usage: python GestureLock.py -g grammar_file.gmr' exit(0) answer_grammer = read_grammar(gfn) im_orig_writer = ImageWriter(output_folder=get_output_folder()) im_contour_writer = ImageWriter(output_folder='out2') prev = [] while True: k = cv.WaitKey(msdelay) k = chr(k) if k > 0 else 0 if handle_keyboard(k) < 0: break bgrimg = cv.QueryFrame(cam) if not bgrimg: break im_orig_writer.write(bgrimg) cv.Flip(bgrimg, None, 1) contours = session.process(bgrimg) img = cv.CreateImage((bgrimg.width, bgrimg.height), 8, 3) if contours: ges, area, depth = ga.recognize(contours) x, y, r, b = im.find_max_rectangle(contours) cv.Rectangle(img, (x,y), (r, b), im.color.RED) cv.DrawContours(img, contours, im.color.RED, im.color.GREEN, 1, thickness=3) print ges currentInput = grammar.instantGes(ges) print currentInput if len(prev)>=2: for i,g in enumerate(currentInput): im.puttext(prev[0], str(g), 30, 70+40*i) im_contour_writer.write(prev[0]) prev.append( img ) prev.pop(0) else: prev.append( img ) if grammar == answer_grammer: for i,g in enumerate(currentInput): im.puttext(prev[0], str(g), 30, 70+40*i) im_contour_writer.write(prev[0]) im.puttext(prev[0], 'AUTHENTICATED!', 30, 70+40*len(currentInput)) im_contour_writer.write(prev[0]) print 'AUTHENTICATED!!!!' break cv.ShowImage(proc_win_name, img)
def capture(): """main function""" # parse cmd line options parser = OptionParser() parser.add_option("-0", "--file0", dest="file0", help="path of target file0") parser.add_option("-1", "--file1", dest="file1", help="path of target file1") (options, args) = parser.parse_args() file0 = "out0.avi" file1 = "out1.avi" if options.file0: file0 = options.file0 if options.file1: file1 = options.file1 print "[INFO ] output file0: " + str(file0) print "[INFO ] output file1: " + str(file1) # init cams cam0 = cv.CaptureFromCAM(0) cam1 = cv.CaptureFromCAM(1) # check if cams are init correctly if not cam0: print "[ERROR] Could not init cam0" return if not cam1: print "[ERROR] Could not init cam1" return # skip first frames since they are normally garbage... print "[INFO ] Skipping first 10 frames..." for i in xrange(10): frame0 = cv.QueryFrame(cam0) frame1 = cv.QueryFrame(cam1) # init some vars # TODO: calc fps and image size correctly and output on console writer0 = cv.CreateVideoWriter(file0, cv.CV_FOURCC('M', 'J', 'P', 'G'), 30, (320, 240)) writer1 = cv.CreateVideoWriter(file1, cv.CV_FOURCC('M', 'J', 'P', 'G'), 30, (320, 240)) # create some windows to output frames cv.NamedWindow("cam0", cv.CV_WINDOW_AUTOSIZE) cv.NamedWindow("cam1", cv.CV_WINDOW_AUTOSIZE) print "[INFO ] Starting recording..." print "[INFO ] To quit press q or esc..." while True: # save the frames we want... frame0 = cv.QueryFrame(cam0) frame1 = cv.QueryFrame(cam1) # check if frames are valid if not frame0: print "[ERROR] could not query frame from cam0" continue if not frame1: print "[ERROR] could not query frame from cam1" continue # write frames to video files cv.WriteFrame(writer0, frame0) cv.WriteFrame(writer1, frame1) # output frames... cv.ShowImage("cam0", frame0) cv.ShowImage("cam1", frame1) key = cv.WaitKey(100) if key == 113 or key == 27: # esc or q key break # destroy stuff... print "[INFO ] destroying opencv objects..." cv.DestroyWindow("cam0") cv.DestroyWindow("cam1") del (writer0) del (writer1) del (cam0) del (cam1) print "[INFO ] everything done... bye..."
def __init__(self, device=-1): """Given a device number provides access to that device - -1, the default, means choose any. There is no way of querying the devices and finding out what each is unfortunatly.""" self.vid = cv.CaptureFromCAM(device) self.frame = None
def __init__(self, index=0, **k): self.cam = cv.CaptureFromCAM(index) super(OpenCVCapture, self).__init__(**k)