def scanFaces(src): total=0 c=cv.CloneImage(src) frams=[] frams.append(src) # 原图 cv.Flip(c,None,0) frams.append(c) # 水平翻转后的 dst=cv.CreateImage((src.height,src.width), src.depth,src.channels) cv.Transpose(src,dst) cv.Flip(dst,None,0) frams.append(dst) # 逆时针90 c2=cv.CloneImage(src) cv.Flip(c2,None,0) dst=cv.CreateImage((src.height,src.width), src.depth,src.channels) cv.Transpose(c2,dst) frams.append(dst) # 顺时针90 for i,img in enumerate(frams): count[0]+=ifFace(img,(img.width,img.height)) if count[0]>=15: return True else: return False
def Color_callibration(capture): vals = [] bgr = [] mini = [255, 255, 255] maxi = [0, 0, 0] cv.NamedWindow("BGR", 0) print 'Please Put Your color in the circular area.Press ESC to start Callibration:' while 1: image = cv.QueryFrame(capture) cv.Flip(image, image, 1) cv.Circle(image, (int(200), int(300)), 10, cv.CV_RGB(255, 255, 255), 4) cv.ShowImage("BGR", image) c = cv.WaitKey(33) if c == 27: break print 'Starting Callibration...Analyzing the Object...' for i in range(0, 100): image = cv.QueryFrame(capture) cv.Flip(image, image, 1) cv.Smooth(image, image, cv.CV_MEDIAN, 3, 0) imagehsv = cv.CreateImage(cv.GetSize(image), 8, 3) cv.CvtColor(image, imagehsv, cv.CV_BGR2YCrCb) vals = cv.Get2D(imagehsv, 300, 200) font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 1, 0, 2, 8) cv.PutText( image, " " + str(vals[0]) + "," + str(vals[1]) + "," + str(vals[2]), (200, 300), font, (55, 25, 255)) for j in range(0, 3): if (vals[j] < mini[j]): mini[j] = vals[j] if (vals[j] > maxi[j]): maxi[j] = vals[j] cv.Circle(image, (int(200), int(300)), 10, cv.CV_RGB(255, 255, 255), 4) cv.ShowImage("BGR", image) c = cv.WaitKey(33) if c == 27: break print 'Analyzation Completed' mini[0] -= 35 mini[1] -= 15 mini[2] -= 15 maxi[0] += 35 maxi[1] += 15 maxi[2] += 15 for i in range(0, 3): if (mini[i] < 0): mini[i] = 0 if (maxi[i] > 255): maxi[i] = 255 cv.DestroyWindow("BGR") bgr = (mini, maxi) return bgr
def getMat(self): hdc = win32gui.GetWindowDC(self.hwnd) dc_obj = win32ui.CreateDCFromHandle(hdc) memorydc = dc_obj.CreateCompatibleDC() data_bitmap = win32ui.CreateBitmap() data_bitmap.CreateCompatibleBitmap(dc_obj, self.width, self.height) memorydc.SelectObject(data_bitmap) memorydc.BitBlt((0, 0), (self.width, self.height), dc_obj, (self.dx, self.dy), win32con.SRCCOPY) bmpheader = struct.pack("LHHHH", struct.calcsize("LHHHH"), self.width, self.height, 1, 24) c_bmpheader = ctypes.create_string_buffer(bmpheader) # padded_length = (string_length + 3) & -3 for 4-byte aligned. c_bits = ctypes.create_string_buffer(" " * (self.width * ((self.height * 3 + 3) & -3))) res = ctypes.windll.gdi32.GetDIBits(memorydc.GetSafeHdc(), data_bitmap.GetHandle(), 0, self.height, c_bits, c_bmpheader, win32con.DIB_RGB_COLORS) win32gui.DeleteDC(hdc) win32gui.ReleaseDC(self.hwnd, hdc) memorydc.DeleteDC() win32gui.DeleteObject(data_bitmap.GetHandle()) cv_im = cv.CreateImageHeader((self.width, self.height), cv.IPL_DEPTH_8U, 3) cv.SetData(cv_im, c_bits.raw) # flip around x-axis cv.Flip(cv_im, None, 0) mat = cv.GetMat(cv_im) return numpy.asarray(mat)
def capture_image(capture, mapx=None, mapy=None): img = cv.CloneImage(cv.QueryFrame(capture)) #Flip if FLIP_IMAGE: img2 = cv.CloneImage(img) cv.Flip(img, img2, 0) cv.Flip(img2, img, 1) #undistort if calibration matrices were given if mapx != None and mapy != None: udimg = cv.CloneImage(img) cv.Remap(img, udimg, mapx, mapy) img = udimg return img
def run(self): hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0, 180)], 1) backproject_mode = False while True: frame = cv.QueryFrame(self.capture) cv.Flip(frame, frame, 0) # Convert to HSV and keep the hue hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(frame, frame, cv.CV_RGB2BGR) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) #cv.CvtColor(frame, hsv, cv.CV_RGB2HSV) self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.Split(hsv, self.hue, None, None, None) # Compute back projection backproject = cv.CreateImage(cv.GetSize(frame), 8, 1) # Run the cam-shift cv.CalcArrBackProject([self.hue], backproject, hist) if self.track_window and is_rect_nonzero(self.track_window): crit = (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1) (iters, (area, value, rect), track_box) = cv.CamShift(backproject, self.track_window, crit) self.track_window = rect # If mouse is pressed, highlight the current selected rectangle # and recompute the histogram if self.drag_start and is_rect_nonzero(self.selection): sub = cv.GetSubRect(frame, self.selection) save = cv.CloneMat(sub) cv.ConvertScale(frame, frame, 0.5) cv.Copy(save, sub) x, y, w, h = self.selection cv.Rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255)) sel = cv.GetSubRect(self.hue, self.selection) cv.CalcArrHist([sel], hist, 0) (_, max_val, _, _) = cv.GetMinMaxHistValue(hist) if max_val != 0: cv.ConvertScale(hist.bins, hist.bins, 255. / max_val) elif self.track_window and is_rect_nonzero(self.track_window): cv.EllipseBox(frame, track_box, cv.CV_RGB(255, 0, 0), 3, cv.CV_AA, 0) if not backproject_mode: cv.ShowImage("CamShiftDemo", frame) else: cv.ShowImage("CamShiftDemo", backproject) cv.ShowImage("Histogram", self.hue_histogram_as_image(hist)) c = cv.WaitKey(7) % 0x100 if c == 27: break elif c == ord("b"): backproject_mode = not backproject_mode
def rotate_camera(currImage, _90_degrees_steps_anti_clockwise): if _90_degrees_steps_anti_clockwise != 2 : image_size = cv.GetSize(currImage) print image_size, currImage.depth, currImage.nChannels rotatedImage = cv.CreateImage(image_size, currImage.depth, 3) # YUV: 8, 3 / GRAY: 8,1 else: rotatedImage = cv.CloneImage(currImage) if _90_degrees_steps_anti_clockwise != 2 : print currImage, rotatedImage #cv.Transpose(currImage, rotatedImage) if _90_degrees_steps_anti_clockwise == 3 : cv.Flip(rotatedImage, rotatedImage, 1) elif _90_degrees_steps_anti_clockwise == 1 : cv.Flip(rotatedImage, rotatedImage, 0) elif _90_degrees_steps_anti_clockwise == 2 : cv.Flip(rotatedImage, rotatedImage, -1) return rotatedImage
def get_rotated(self, im=None): ''' Return a list of the fourth possible image rotation ''' l = [] tmp_im = self.image if im is None else im l.append(tmp_im) #Add the file without rotation for i in range(3): im_rot = cv.CreateImage((tmp_im.height, tmp_im.width), tmp_im.depth, tmp_im.channels) cv.Transpose(tmp_im, im_rot) cv.Flip(im_rot, im_rot, flipMode=1) l.append(im_rot) tmp_im = im_rot return l
def camshift(x, y, w, h, selection): print "Performing camshift with x:{} y:{} w:{} h:{}".format(x, y, w, h) print selection hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0, 180)], 1) while True: print "entered loop" #camshift termination criteria (10 iterations without movement of 1 pixel ends camshift) frame = cv.QueryFrame(cap) cv.Flip(frame, frame, 1) #print "switching to HSV" hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) hue = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.Split(hsv, hue, None, None, None) #compute back projection # print "back projection" backproject = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CalcArrBackProject([hue], backproject, hist) #run the camshift #print "camshift" print "Selection" #pdb.set_trace() print selection crit = (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1) (iters, (area, value, rect), track_box) = cv.CamShift(backproject, selection, crit) print "rect" print rect if rect[0] > 0 and rect[1] > 0: selection = rect print "SelectionNew" print selection print "track_box" print track_box #draw the surrounding ellipse # print "ellipse" cv.EllipseBox(frame, track_box, cv.CV_RGB(255, 0, 0), 3, cv.CV_AA, 0) #draw image #print "drawing image" cv.ShowImage("CamShift", frame) if cv.WaitKey(1) & 0xFF == ord('q'): break
def findImageEx(self, source, x, y, width, height): hdc = win32gui.GetWindowDC(self.hwnd) dc_obj = win32ui.CreateDCFromHandle(hdc) memorydc = dc_obj.CreateCompatibleDC() data_bitmap = win32ui.CreateBitmap() data_bitmap.CreateCompatibleBitmap(dc_obj, self.width, self.height) memorydc.SelectObject(data_bitmap) memorydc.BitBlt((0, 0), (self.width, self.height), dc_obj, (self.dx, self.dy), win32con.SRCCOPY) bmpheader = struct.pack("LHHHH", struct.calcsize("LHHHH"), self.width, self.height, 1, 24) c_bmpheader = ctypes.create_string_buffer(bmpheader) # padded_length = (string_length + 3) & -3 for 4-byte aligned. c_bits = ctypes.create_string_buffer(" " * (self.width * ((self.height * 3 + 3) & -3))) res = ctypes.windll.gdi32.GetDIBits(memorydc.GetSafeHdc(), data_bitmap.GetHandle(), 0, self.height, c_bits, c_bmpheader, win32con.DIB_RGB_COLORS) win32gui.DeleteDC(hdc) win32gui.ReleaseDC(self.hwnd, hdc) memorydc.DeleteDC() win32gui.DeleteObject(data_bitmap.GetHandle()) cv_im = cv.CreateImageHeader((self.width, self.height), cv.IPL_DEPTH_8U, 3) cv.SetData(cv_im, c_bits.raw) # flip around x-axis cv.Flip(cv_im, None, 0) im_region = cv.GetSubRect(cv_im, (x, y, width, height)) #cv.SaveImage('aaak.bmp', im_region) template_source = cv.LoadImage(source) # From the manual of MatchTemplate result_width = im_region.width - template_source.width + 1 result_height = im_region.height - template_source.height + 1; result = cv.CreateImage((result_width, result_height), 32, 1) cv.MatchTemplate(im_region, template_source, result, cv2.TM_CCOEFF_NORMED) minVal, maxVal, minLoc, maxLoc = cv.MinMaxLoc(result) #print minVal, maxVal, minLoc, maxLoc minLoc2 = minLoc[0] + x, minLoc[1] + y maxLoc2 = maxLoc[0] + x, maxLoc[1] + y return minVal, maxVal, minLoc2, maxLoc2
def _updateFrame(self, dt): print "p", #Prepare frame timeStamp = time.time() self._midiListner.getData(False) self._mediaPool.updateVideo(timeStamp) self._multiprocessLogger.handleQueuedLoggs() self.checkAndUpdateFromConfiguration() updateConfig = self._guiServer.processGuiRequests() if (updateConfig == True): self._configCheckCounter = self._configCheckEveryNRound + 1 # # #Show frame: mixedImage = self._mediaMixer.getImage() cv.Flip(mixedImage, mixedImage, 0) # cv.CvtColor(mixedImage, mixedImage, cv.CV_BGR2RGB) rawData = mixedImage.tostring() self._pygletImage.set_data("BGR", self._internalResolutionX * 3, rawData)
def retrieveCam(self): if self.capture: frame_copy = None frame = cv.QueryFrame(self.capture) if not frame: return 0 if not frame_copy: frame_copy = cv.CreateImage((frame.width, frame.height), cv.IPL_DEPTH_8U, frame.nChannels) if frame.origin == cv.IPL_ORIGIN_TL: cv.Copy(frame, frame_copy) else: cv.Flip(frame, frame_copy, 0) face_set, img_rectangle = self.detect_and_draw(frame_copy) return face_set, img_rectangle else: return [0, 0] self.capture = cv.CreateCameraCapture(2) return self.capture
def _run_camera(self, camera_position): cascade = cv.Load("haarcascades/haarcascade_frontalface_alt2.xml") capture = cv.CreateCameraCapture(self.camera_number[camera_position]) if self.show_main_view[camera_position]: cv.NamedWindow("result" + str(camera_position), 1) if capture: frame_copy = None #i=0 prev_t, now_t = time.time(), 0 while self.should_camera_be_able_to_run: frame = cv.QueryFrame(capture) if self.flip_image_verticaly[camera_position]: cv.Flip(frame, frame) if not frame: print "not frame" else: now_t = time.time() fps = 1 / (now_t - prev_t) prev_t = now_t print fps self.detect_and_draw(frame, cascade, camera_position) #cv.WaitKey(1) #continue #if self.show_main_view[camera_position]: cv.ShowImage("result"+str(camera_position), frame) #if not frame_copy: # frame_copy = cv.CreateImage((frame.width,frame.height),cv.IPL_DEPTH_8U, frame.nChannels) #if frame.origin == cv.IPL_ORIGIN_TL: # cv.Copy(frame, frame_copy) #else: # cv.Flip(frame, frame_copy, 0) #if cascade: #self.detect_and_draw(frame, cascade, camera_position) #else: #image = cv.LoadImage(input_name, 1) #cv.WaitKey(0) try: cv.DestroyWindow("result" + str(camera_position)) except: print "could not destroy window"
cv.NamedWindow("Tracker", 1) if cap: frame_copy = None while (True): # Capture frame-by-frame frame = cv.QueryFrame(cap) if not frame: cv.WaitKey(0) break if not frame_copy: frame_copy = cv.CreateImage((frame.width, frame.height), cv.IPL_DEPTH_8U, frame.nChannels) if frame.origin == cv.IPL_ORIGIN_TL: cv.Flip(frame, frame, -1) # Our operations on the frame come here gray = cv.CreateImage((frame.width, frame.height), 8, 1) small_img = cv.CreateImage((cv.Round( frame.width / image_scale), cv.Round(frame.height / image_scale)), 8, 1) # convert color input image to grayscale cv.CvtColor(frame, gray, cv.CV_BGR2GRAY) # scale input image for faster processing cv.Resize(gray, small_img, cv.CV_INTER_LINEAR) cv.EqualizeHist(small_img, small_img)
vc = cv2.VideoCapture("/media/bat/DATA/images/2013/06/27/00176.MTS") width, height = 640, 480 writer = cv2.VideoWriter(filename="outputVideo.avi", fourcc=cv.CV_FOURCC('M', 'J', 'P', 'G'), fps=15, frameSize=(width, height)) if vc.isOpened(): # try to get the first frame rval, frame = vc.read() else: rval = False while rval: cv2.imshow("preview", frame) rval, frame = vc.read() im = cv.fromarray(frame) im = cv.Flip(im, flipMode=-1) cv.ShowImage('180_rotation', im) key = cv2.waitKey(20) key -= 0x100000 # Corrects bug in openCV... if key == 27: # Esc key to stop break elif key == 115: # s key for snapshot cv2.imwrite( datetime.datetime.utcnow().strftime("%Yy%mm%dd%Hh%Mm%Ss") + '.jpg', frame) writer.write(frame) cv2.destroyAllWindows()
def run(self): logging.debug(' starting run ') global samecolorclient global capture global centroidList #abh global lock #abh global lock2 #abh global lock3 #abh global lock4 #abh mydata = threading.local() #window1=" Color Detection" mydata.window2 = str(self.name) + " Threshold" #cv.NamedWindow(window1,0) lock4.acquire() #abh cv.NamedWindow(mydata.window2, 0) lock4.release() #abh mydata.centroidold = [0, 0] mydata.flag = 0 mydata.roi = [100, 22, 390, 390] #mydata.roi=[95,40,380,350] while True: lock2.acquire() #abh lock4.acquire() #abh mydata.color_image = cv.QueryFrame(capture) lock4.release() #abh lock2.release() #abh if (mydata.flag == 0): lock4.acquire #abh lock4.release #abh mydata.color_image = cv.GetSubRect(mydata.color_image, (100, 22, 390, 390)) lock4.release #abh else: lock4.acquire #abh lock4.release #abh mydata.color_image = cv.GetSubRect( mydata.color_image, (int(mydata.roi[0]), int(mydata.roi[1]), int( mydata.roi[2]), int(mydata.roi[3]))) lock4.release #abh lock4.acquire #abh lock4.release #abh cv.Flip(mydata.color_image, mydata.color_image, 1) cv.Smooth(mydata.color_image, mydata.color_image, cv.CV_MEDIAN, 3, 0) #logging.debug(' Starting getthresholdedimg ') mydata.imghsv = cv.CreateImage(cv.GetSize(mydata.color_image), 8, 3) cv.CvtColor(mydata.color_image, mydata.imghsv, cv.CV_BGR2YCrCb) # Convert image from RGB to HSV mydata.imgnew = cv.CreateImage(cv.GetSize(mydata.color_image), cv.IPL_DEPTH_8U, 1) mydata.imgthreshold = cv.CreateImage( cv.GetSize(mydata.color_image), 8, 1) lock4.release #abh mydata.c = self.color[0] mydata.minc = (float(mydata.c[0]), float(mydata.c[1]), float(mydata.c[2])) mydata.c = self.color[1] mydata.maxc = (float(mydata.c[0]), float(mydata.c[1]), float(mydata.c[2])) lock4.acquire #abh lock4.release #abh cv.InRangeS(mydata.imghsv, cv.Scalar(*(mydata.minc)), cv.Scalar(*(mydata.maxc)), mydata.imgnew) cv.Add(mydata.imgnew, mydata.imgthreshold, mydata.imgthreshold) #logging.debug(' Exiting getthreasholdedimg') #logging.debug('function returned from thresholdedimg') cv.Erode(mydata.imgthreshold, mydata.imgthreshold, None, 1) cv.Dilate(mydata.imgthreshold, mydata.imgthreshold, None, 4) mydata.img2 = cv.CloneImage(mydata.imgthreshold) mydata.storage = cv.CreateMemStorage(0) mydata.contour = cv.FindContours(mydata.imgthreshold, mydata.storage, cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_SIMPLE) lock4.release #abh mydata.points = [] #logging.debug('Starting while contour') while mydata.contour: # Draw bounding rectangles lock4.acquire #abh lock4.release #abh mydata.bound_rect = cv.BoundingRect(list(mydata.contour)) lock4.release #abh mydata.contour = mydata.contour.h_next() mydata.pt1 = (mydata.bound_rect[0], mydata.bound_rect[1]) mydata.pt2 = (mydata.bound_rect[0] + mydata.bound_rect[2], mydata.bound_rect[1] + mydata.bound_rect[3]) mydata.points.append(mydata.pt1) mydata.points.append(mydata.pt2) lock4.acquire #abh lock4.release #abh cv.Rectangle( mydata.color_image, mydata.pt1, mydata.pt2, cv.CV_RGB(mydata.maxc[0], mydata.maxc[1], mydata.maxc[2]), 1) lock4.release #abh # Calculating centroids if (((mydata.bound_rect[2]) * (mydata.bound_rect[3])) < 3500): #logging.debug('Inside iffffffffffffffffffffffff') lock4.acquire #abh lock4.release #abh mydata.centroidx = cv.Round( (mydata.pt1[0] + mydata.pt2[0]) / 2) mydata.centroidy = cv.Round( (mydata.pt1[1] + mydata.pt2[1]) / 2) lock4.release #abh if (mydata.flag == 1): #logging.debug("inside flag1") mydata.centroidx = mydata.roi[0] + mydata.centroidx mydata.centroidy = mydata.roi[1] + mydata.centroidy mydata.centroidnew = [mydata.centroidx, mydata.centroidy] #logging.debug('mydataroi[0] '+str(mydata.roi[0]) + ';centroidx ' + str(mydata.centroidx)) #logging.debug('mydataroi[1] '+str(mydata.roi[1]) + ';centroidy ' + str(mydata.centroidy)) #print mydata.centroidx #abh #print mydata.centroidy #abh mydata.tmpclient = [] lock3.acquire() #abh mydata.tmpclient = samecolorclient[self.i] lock3.release() #abh mydata.distance = math.sqrt( math.pow((mydata.centroidnew[0] - mydata.centroidold[0]), 2) + math.pow((mydata.centroidnew[1] - mydata.centroidold[1]), 2)) #lock.acquire() #abh #abh commented for mydata.j in range(len(mydata.tmpclient)): mydata.client_socket = mydata.tmpclient[mydata.j] #logging.debug('before centroid send...') if (mydata.distance >= 1.50): print 'inside 1.50 ' #self.server_socket.sendto(str(mydata.centroidnew),mydata.client_socket) #abh lock.acquire() #abh centroidList[colorlist.index( self.color)] = mydata.centroidnew #abh del mydata.centroidold[:] #logging.debug(str(centroidList)) self.server_socket.sendto( str(centroidList), mydata.client_socket) #abh lock.release() #abh #logging.debug ('updating done.') #abh #print centroidList #abh mydata.centroidold = mydata.centroidnew[:] else: #self.server_socket.sendto(str(mydata.centroidold),mydata.client_socket) #abh lock.acquire() #abh centroidList[colorlist.index( self.color)] = mydata.centroidold #abh #logging.debug(str(centroidList)) self.server_socket.sendto( str(centroidList), mydata.client_socket) #abh lock.release() #abh #logging.debug ('updating done2.') #abh #print centroidList #abh # logging.debug('byte sent to client') #lock.release() #abh mydata.roi[0] = mydata.centroidx - 50 mydata.roi[1] = mydata.centroidy - 50 if (mydata.roi[0] < 95): mydata.roi[0] = 95 if (mydata.roi[1] < 40): mydata.roi[1] = 40 mydata.roi[2] = 100 mydata.roi[3] = 100 if ((mydata.roi[0] + mydata.roi[2]) > 475): mydata.roi[0] = mydata.roi[0] - ( (mydata.roi[0] + mydata.roi[2]) - 475) if ((mydata.roi[1] + mydata.roi[3]) > 390): mydata.roi[1] = mydata.roi[1] - ( (mydata.roi[1] + mydata.roi[3]) - 390) #del mydata.centroidnew[:] mydata.flag = 1 if mydata.contour is None: mydata.flag = 0 #cv.ShowImage(window1,mydata.color_image) lock4.acquire #abh lock4.release #abh cv.ShowImage(mydata.window2, mydata.img2) lock4.release #abh if cv.WaitKey(33) == 27: #here it was 33 instead of 10 #cv.DestroyWindow(mydata.window1) #cv.DestroyWindow(mydata.window2) break
def detectFaces(): global frame_copy, min_size, image_scale, haar_scale, min_neighbors, haar_flags, cap, cam_pan, cam_tilt t0 = cv.GetTickCount() frame = cv.QueryFrame(cap) if not frame: cv.WaitKey(0) return False if not frame_copy: frame_copy = cv.CreateImage((frame.width,frame.height), cv.IPL_DEPTH_8U, frame.nChannels) if frame.origin == cv.IPL_ORIGIN_TL: cv.Flip(frame, frame, -1) # Our operations on the frame come here gray = cv.CreateImage((frame.width,frame.height), 8, 1) small_img = cv.CreateImage((cv.Round(frame.width / image_scale), cv.Round (frame.height / image_scale)), 8, 1) small_img2 = cv.CreateImage((cv.Round(frame.width / image_scale), cv.Round (frame.height / image_scale)), 8, 1) # convert color input image to grayscale cv.CvtColor(frame, gray, cv.CV_BGR2GRAY) # scale input image for faster processing cv.Resize(gray, small_img, cv.CV_INTER_LINEAR) cv.EqualizeHist(small_img, small_img) #flip the image for more convenient camera mounting cv.Flip(small_img,small_img2,-1) midFace = None t1 = cv.GetTickCount() if(cascade): t = cv.GetTickCount() # HaarDetectObjects takes 0.02s faces = cv.HaarDetectObjects(small_img2, cascade, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size) t = cv.GetTickCount() - t if faces: #lights(50 if len(faces) == 0 else 0, 50 if len(faces) > 0 else 0,0,50) for ((x, y, w, h), n) in faces: # the input to cv.HaarDetectObjects was resized, so scale the # bounding box of each face and convert it to two CvPoints pt1 = (int(x * image_scale), int(y * image_scale)) pt2 = (int((x + w) * image_scale), int((y + h) * image_scale)) # cv.Rectangle(frame, pt1, pt2, cv.RGB(100, 220, 255), 1, 8, 0) # get the xy corner co-ords, calc the midFace location x1 = pt1[0] x2 = pt2[0] y1 = pt1[1] y2 = pt2[1] midFaceX = x1+((x2-x1)/2) midFaceY = y1+((y2-y1)/2) midFace = (midFaceX, midFaceY) offsetX = midFaceX / float(frame.width/2) offsetY = midFaceY / float(frame.height/2) offsetX -= 1 offsetY -= 1 cam_pan -= (offsetX * 5) cam_tilt += (offsetY * 5) cam_pan = max(0,min(180,cam_pan)) cam_tilt = max(0,min(180,cam_tilt)) print(offsetX, offsetY, midFace, cam_pan, cam_tilt, frame.width, frame.height) sys.stdout.flush() # pan(int(cam_pan-90)) # tilt(int(cam_tilt-90)) #break # print "e"+str((t1-t0)/1000000)+"-"+str( (cv.GetTickCount()-t1)/1000000) # cv.ShowImage('Tracker',frame) if cv.WaitKey(1) & 0xFF == ord('q'): return False return True
def processa_video(imagem): cv.SetData(imagem_cv, imagem) cv.Flip(imagem_cv, imagem_cv, 1) cv.ShowImage('Video', imagem_cv)
while True: frame = cv.QueryFrame(capture) if not frame: cv.WaitKey(0) break if not frame_copy: frame_copy = cv.CreateImage((frame.width, frame.height), cv.IPL_DEPTH_8U, frame.nChannels) # frame_copy = cv.CreateImage((frame.width,frame.height), # cv.IPL_DEPTH_8U, frame.nChannels) if frame.origin == cv.IPL_ORIGIN_TL: cv.Copy(frame, frame_copy) else: cv.Flip(frame, frame_copy, 0) detect_and_draw(frame_copy, cascade, jpg_cnt) jpg_cnt += 1 #print(jpg_cnt) if cv.WaitKey(10) >= 0: break else: image = cv.LoadImage(input_name, 1) detect_and_draw(image, cascade, jpg_cnt) jpg_cnt += 1 cv.WaitKey(0) cv.DestroyWindow("result")
def get_frame(self): try: frame = cv.QueryFrame(self.camera) if not frame: print('Camera error') return if not self.frame_copy: self.frame_copy = cv.CreateImage((frame.width, frame.height), cv.IPL_DEPTH_8U, frame.nChannels) if frame.origin == cv.IPL_ORIGIN_TL: cv.Flip(frame, frame, -1) # Our operations on the frame come here gray = cv.CreateImage((frame.width, frame.height), 8, 1) small_img = cv.CreateImage( (cv.Round(frame.width / self.image_scale), cv.Round(frame.height / self.image_scale)), 8, 1) # convert color input image to grayscale cv.CvtColor(frame, gray, cv.CV_BGR2GRAY) # Scale input image for faster processing cv.Resize(gray, small_img, cv.CV_INTER_LINEAR) cv.EqualizeHist(small_img, small_img) midFace = None if (self.cascade): t = cv.GetTickCount() # Do haar detection faces = cv.HaarDetectObjects(small_img, self.cascade, cv.CreateMemStorage(0), self.haar_scale, self.min_neighbors, self.haar_flags, self.min_size) t = cv.GetTickCount() - t if faces: if not os.path.isfile('face.jpg'): # Save temporary image if no existing one image = cv2.imencode('.jpeg', np.asarray(frame[:, :]))[1] image = cv2.imdecode(image, cv2.IMREAD_COLOR) cv2.imwrite('face.jpg', image) for ((x, y, w, h), n) in faces: # Resize the input, scale the bounding box of each face and convert to two # CvPoints pt1 = (int(x * self.image_scale), int(y * self.image_scale)) pt2 = (int((x + w) * self.image_scale), int((y + h) * self.image_scale)) cv.Rectangle(frame, pt1, pt2, cv.RGB(100, 220, 255), 1, 8, 0) # Calculate mid point of the face x1, y1 = pt1 x2, y2 = pt2 midFaceX = x1 + ((x2 - x1) / 2) midFaceY = y1 + ((y2 - y1) / 2) midFace = (midFaceX, midFaceY) # Calculate offset of camera angle offsetX = midFaceX / float(frame.width / 2) offsetY = midFaceY / float(frame.height / 2) offsetX -= 1 offsetY -= 1 self.cam_pan -= (offsetX * 5) self.cam_tilt += (offsetY * 5) self.cam_pan = max(0, min(180, self.cam_pan)) self.cam_tilt = max(0, min(180, self.cam_tilt)) # Pan and tilt to the next position pan(int(self.cam_pan - 90)) tilt(int(self.cam_tilt - 90)) # Push processed framge image to flask image = cv2.imencode('.jpeg', np.asarray(frame[:, :]))[1].tostring() return image except Exception as e: print(e) return
def run(self): hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0, 180)], 1) backproject_mode = False print "hitting run section" x = 0 while True: #print x #x = x + 1 frame = cv.QueryFrame(self.capture) cv.Flip(frame, frame, 1) # Convert to HSV and keep the hue hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.Split(hsv, self.hue, None, None, None) # Compute back projection backproject = cv.CreateImage(cv.GetSize(frame), 8, 1) # Run the cam-shift cv.CalcArrBackProject([self.hue], backproject, hist) if self.track_window and is_rect_nonzero(self.track_window): crit = (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1) print self.track_window (iters, (area, value, rect), track_box) = cv.CamShift(backproject, self.track_window, crit) self.track_window = rect print self.track_window try: #prints the center x and y value of the tracked ellipse coord = track_box[0] print "center = {}".format(coord) if (coord[0] < 320): print "move right" # ser.write("R") elif (coord[0] == 320): print "do nothing" else: print "move left" # ser.write("L") except UnboundLocalError: print "track_box is None" # If mouse is pressed, highlight the current selected rectangle # and recompute the histogram if self.drag_start and is_rect_nonzero(self.selection): sub = cv.GetSubRect(frame, self.selection) save = cv.CloneMat(sub) cv.ConvertScale(frame, frame, 0.5) cv.Copy(save, sub) x, y, w, h = self.selection cv.Rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255)) sel = cv.GetSubRect(self.hue, self.selection) cv.CalcArrHist([sel], hist, 0) (_, max_val, _, _) = cv.GetMinMaxHistValue(hist) if max_val != 0: cv.ConvertScale(hist.bins, hist.bins, 255. / max_val) elif self.track_window and is_rect_nonzero(self.track_window): print track_box cv.EllipseBox(frame, track_box, cv.CV_RGB(255, 0, 0), 3, cv.CV_AA, 0) if not backproject_mode: cv.ShowImage("CamShiftDemo", frame) else: cv.ShowImage("CamShiftDemo", backproject) cv.ShowImage("Histogram", self.hue_histogram_as_image(hist)) c = cv.WaitKey(7) % 0x100 if c == 27: break elif c == ord("b"): backproject_mode = not backproject_mode
def detect_and_draw(img, cascade): # allocate temporary images gray = cv.CreateImage((img.width, img.height), 8, 1) small_img = cv.CreateImage((cv.Round( img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1) # convert color input image to grayscale cv.CvtColor(img, gray, cv.CV_BGR2GRAY) # scale input image for faster processing cv.Resize(gray, small_img, cv.CV_INTER_LINEAR) cv.EqualizeHist(small_img, small_img) if (cascade): t = cv.GetTickCount() faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size) t = cv.GetTickCount() - t print "time taken for detection = %gms" % ( t / (cv.GetTickFrequency() * 1000.)) if faces: for ((x, y, w, h), n) in faces: # the input to cv.HaarDetectObjects was resized, so scale the # bounding box of each face and convert it to two CvPoints pt1 = (int(x * image_scale), int(y * image_scale)) pt2 = (int((x + w) * image_scale), int((y + h) * image_scale)) cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0) cv.ShowImage("video", img) if __name__ == '__main__': parser = OptionParser( usage="usage: %prog [options] [filename|camera_index]") parser.add_option( "-c", "-cascade", action="store", dest="cascade", type="str", help="Haar cascade file, default %default", default="../data/haarcascades/haarcascade_frontalface_alt.xml")( options, args) = parser.parse_args() cascade = cv.Load(options.cascade) if len(args) != 1: parser.print_help() sys.exit(1) input_name = args[0] if input_name.isdigit(): capture = cv.CreateCameraCapture(int(input_name)) else: capture = None cv.NamedWindow("video", 1) #size of the video width = 160 height = 120 if width is None: width = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH)) else: cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, width) if height is None: height = int( cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT)) else: cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, height) if capture: frame_copy = None while True: frame = cv.QueryFrame(capture) if not frame: cv.WaitKey(0) break if not frame_copy: frame_copy = cv.CreateImage((frame.width, frame.height), cv.IPL_DEPTH_8U, frame.nChannels) if frame.origin == cv.IPL_ORIGIN_TL: cv.Copy(frame, frame_copy) else: cv.Flip(frame, frame_copy, 0) detect_and_draw(frame_copy, cascade) if cv.WaitKey(10) >= 0: break else: image = cv.LoadImage(input_name, 1) detect_and_draw(image, cascade) cv.WaitKey(0) cv.DestroyWindow("video")
#main font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 5, 5, 0, 3, 8) #initialize SignsList = ["a.jpg", "b.jpg", "c.jpg", "d.jpg", "e.jpg", "f.jpg"] # list which contain all images of signs imagesList = {"a.jpg": cv.LoadImage("signs/a.jpg", cv.CV_LOAD_IMAGE_GRAYSCALE)} for e in SignsList: imagesList[e] = cv.LoadImage("signs/" + e, cv.CV_LOAD_IMAGE_GRAYSCALE) #imagesList.append(cv.LoadImage("signs/"+e,cv.CV_LOAD_IMAGE_GRAYSCALE)) cv.NamedWindow("Input", cv.CV_WINDOW_AUTOSIZE) cv.NamedWindow("Gesture Space", cv.CV_WINDOW_AUTOSIZE) matchresult = 1 p_capWebcam = cv.CaptureFromCAM(0) while 1: p_imgOriginal = cv.QueryFrame(p_capWebcam) cv.Flip(p_imgOriginal, p_imgOriginal, 1) # capture from webcam p_gray = cv.CreateImage(cv.GetSize(p_imgOriginal), 8, 1) cv.CvtColor(p_imgOriginal, p_gray, cv.CV_BGR2GRAY) cv.SetImageROI(p_gray, (400, 200, 200, 200)) # Region setting of fixed interest cv.Threshold(p_gray, p_gray, 100, 255, cv.CV_THRESH_BINARY_INV) cv.Rectangle(p_imgOriginal, (400, 200), (600, 400), (255, 0, 0), 4) j = 0 for imageI in imagesList: # path of the image list and test each image with the ROI (region of interest) #image_to_test=cv.LoadImage("signs/"+image_path,cv.CV_LOAD_IMAGE_GRAYSCALE) matchresult = compare_2_formes(p_gray, imagesList[imageI]) #comparison #print("le match est "+str(matchresult)) if matchresult < 0.13 and matchresult != 0:
# create the images we need image = cv.CreateImage (cv.GetSize (frame), 8, 3) grey = cv.CreateImage (cv.GetSize (frame), 8, 1) prev_grey = cv.CreateImage (cv.GetSize (frame), 8, 1) pyramid = cv.CreateImage (cv.GetSize (frame), 8, 1) prev_pyramid = cv.CreateImage (cv.GetSize (frame), 8, 1) eig = cv.CreateImage (cv.GetSize (frame), cv.IPL_DEPTH_32F, 1) temp = cv.CreateImage (cv.GetSize (frame), cv.IPL_DEPTH_32F, 1) points = [[], []] # copy the frame, so we can draw on it cv.Copy (frame, image) cv.Flip(image, None, 1) # create a grey version of the image cv.CvtColor (image, grey, cv.CV_BGR2GRAY) if night_mode: # night mode: only display the points cv.SetZero (image) if need_to_init: # we want to search all the good points # create the wanted images # search the good points points [1] = cv.GoodFeaturesToTrack (
cascade = cv.Load("./haarcascade_frontalface_alt.xml") if len(args) != 1: parser.print_help() #sys.exit(1) capture = cv.CreateCameraCapture(0) cv.NamedWindow("result", 1) if capture: frame_copy = None while True: frame = cv.QueryFrame(capture) cv.Flip(frame, frame, flipMode=0) if not frame: cv.WaitKey(0) break if not frame_copy: frame_copy = cv.CreateImage((frame.width, frame.height), cv.IPL_DEPTH_8U, frame.nChannels) if frame.origin == cv.IPL_ORIGIN_TL: cv.Copy(frame, frame_copy) else: cv.Flip(frame, frame_copy, 0) detect_and_draw(frame_copy, cascade) if cv.WaitKey(10) >= 0: break
#using opencv1.0 functions camera = cv.CaptureFromCAM(0) #initializing font, color and variables font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 1, 0, 2, 8) color = (0, 0, 0) point1 = (300, 200) point2 = (400, 300) flag = 0 while True: #grabbing a frame, applying blur, flipping the image frame = cv.QueryFrame(camera) cv.Smooth(frame, frame, cv.CV_BLUR, 3) cv.Flip(frame, frame, 1) #drawing the rectangle and writing the text temp1 = cv.CloneImage(frame) cv.Rectangle(temp1, point1, point2, color, 1) cv.PutText(temp1, "Place in box", (430, 240), font, color) cv.PutText(temp1, "then hit q", (430, 260), font, color) #taking snapshot after q is pressed if cv.WaitKey(10) == 113: flag = 1 cv.SetImageROI(temp1, (300, 200, 100, 100)) template = cv.CloneImage(temp1) cv.ResetImageROI(temp1) cv.DestroyWindow("Image")
def processa_profundidade(imagem): cv.SetData(depth_cv, imagem) cv.Flip(depth_cv, depth_cv, 1) cv.ShowImage('Profundidade', depth_cv)
# check if capture device is OK if not capture: print "Error opening capture device" sys.exit(1) while 1: # do forever #capture the current frame frame = cv.QueryFrame(capture) if frame is None: break # mirror cv.Flip(frame, None, 1) originalImage = frame hsvImage = cv.CreateImage(cv.GetSize(originalImage), 8, 3) cv.CvtColor(originalImage, hsvImage, cv.CV_BGR2HSV) thresholdImage = cv.CreateImage(cv.GetSize(originalImage), 8, 1) cv.InRangeS(hsvImage, cv.Scalar(20.74, 75, 75), cv.Scalar(30.74, 255, 255), thresholdImage) thresholdImageArray = np.asarray(cv.GetMat(thresholdImage)) thresholdImageArray = cv2.GaussianBlur(thresholdImageArray, (0, 0), 2) thresholdImage = cv.fromarray(thresholdImageArray)
def runtracking(): global rgb_image, hsv_image, hsvmouse, pausecam, hsvgreen, hsvyellow, hsvblue, hsvred, homographycomputed global hsvyellowtab, hsvrange global homography, pose_flag global hsvyellowmin, hsvyellowmax, hsvgreenmin, hsvgreenmax, hsvbluemin, hsvbluemax, hsvredmin, hsvredmax global cycloppoint, righteyepoint, lefteyepoint global capture, pausecam, size_image global yellowmask_image, greenmask_image, redmask_image, bluemask_image global p_num, modelepoints, blob_centers global rx, ry, rz global background size_thumb = [size_image[0] / 2, size_image[1] / 2] thumbgreen = cv.CreateImage(size_thumb, cv.IPL_DEPTH_8U, 1) thumbred = cv.CreateImage(size_thumb, cv.IPL_DEPTH_8U, 1) thumbblue = cv.CreateImage(size_thumb, cv.IPL_DEPTH_8U, 1) thumbyellow = cv.CreateImage(size_thumb, cv.IPL_DEPTH_8U, 1) cv.NamedWindow("GreenBlobDetection", cv.CV_WINDOW_AUTOSIZE) cv.ShowImage("GreenBlobDetection", thumbgreen) cv.NamedWindow("YellowBlobDetection", cv.CV_WINDOW_AUTOSIZE) cv.ShowImage("YellowBlobDetection", thumbyellow) cv.NamedWindow("BlueBlobDetection", cv.CV_WINDOW_AUTOSIZE) cv.ShowImage("BlueBlobDetection", thumbblue) cv.NamedWindow("RedBlobDetection", cv.CV_WINDOW_AUTOSIZE) cv.ShowImage("RedBlobDetection", thumbred) rgb_image = cv.QueryFrame(capture) cv.NamedWindow("Source", cv.CV_WINDOW_AUTOSIZE) cv.SetMouseCallback("Source", getObjectHSV) print "Hit ESC key to quit..." # infinite loop for processing while True: time.sleep(0.02) blobcentergreen = findBlob(rgb_image, hsv_image, greenmask_image, greenblob_image, hsvrange, hsvgreenmin, hsvgreenmax, 'g') blobcenteryellow = findBlob(rgb_image, hsv_image, yellowmask_image, yellowblob_image, hsvrange, hsvyellowmin, hsvyellowmax, 'y') blobcenterblue = findBlob(rgb_image, hsv_image, bluemask_image, blueblob_image, hsvrange, hsvbluemin, hsvbluemax, 'b') blobcenterred = findBlob(rgb_image, hsv_image, redmask_image, redblob_image, hsvrange, hsvredmin, hsvredmax, 'r') if not pausecam: if (blobcentergreen != None): cv.Resize(greenblob_image, thumbgreen) cv.ShowImage("GreenBlobDetection", thumbgreen) # print "green center: %d %d %d" %blobcentergreen if (blobcenteryellow != None): cv.Resize(yellowblob_image, thumbyellow) cv.ShowImage("YellowBlobDetection", thumbyellow) # print "yellow center: %d %d %d" %blobcenteryellow if (blobcenterblue != None): cv.Resize(blueblob_image, thumbblue) cv.ShowImage("BlueBlobDetection", thumbblue) # print "blue center: %d %d %d" %blobcenterblue if (blobcenterred != None): cv.Resize(redblob_image, thumbred) cv.ShowImage("RedBlobDetection", thumbred) # print "red center: %d %d %d" %blobcenterred cv.ShowImage("Source", rgb_image) c = cv.WaitKey(7) % 0x100 if c == 27: break if c == ord('p') or c == ord('P'): pausecam = not pausecam if c == ord('y'): hsvyellowtab.append(hsvmouse) hsvyellowmin = mintab(hsvyellowtab) hsvyellowmax = maxtab(hsvyellowtab) print "minyellow" print hsvyellowmin print "maxyellow" print hsvyellowmax if c == ord('Y'): if (len(hsvyellowtab) > 0): hsvyellowtab.pop(len(hsvyellowtab) - 1) if (len(hsvyellowtab) != 0): hsvyellowmin = mintab(hsvyellowtab) hsvyellowmax = maxtab(hsvyellowtab) else: hsvyellowmin = [255, 255, 255] hsvyellowmax = [0, 0, 0] if c == ord('g'): hsvgreentab.append(hsvmouse) hsvgreenmin = mintab(hsvgreentab) hsvgreenmax = maxtab(hsvgreentab) print "mingreen" print hsvgreenmin print "maxgreen" print hsvgreenmax if c == ord('G'): if (len(hsvgreentab) > 0): hsvgreentab.pop(len(hsvgreentab) - 1) if (len(hsvgreentab) != 0): hsvgreenmin = mintab(hsvgreentab) hsvgreenmax = maxtab(hsvgreentab) else: hsvgreenmin = [255, 255, 255] hsvgreenmax = [0, 0, 0] if c == ord('r'): hsvredtab.append(hsvmouse) hsvredmin = mintab(hsvredtab) hsvredmax = maxtab(hsvredtab) print "minred" print hsvredmin print "maxred" print hsvredmax if c == ord('R'): if (len(hsvredtab) > 0): hsvredtab.pop(len(hsvredtab) - 1) if (len(hsvredtab) != 0): hsvredmin = mintab(hsvredtab) hsvredmax = maxtab(hsvredtab) else: hsvredmin = [255, 255, 255] hsvredmax = [0, 0, 0] print "RRR" print "min red" print hsvredmin print "max red" print hsvredmax if c == ord('b'): hsvbluetab.append(hsvmouse) hsvbluemin = mintab(hsvbluetab) hsvbluemax = maxtab(hsvbluetab) print "minblue" print hsvbluemin print "maxblue" print hsvbluemax if c == ord('B'): if (len(hsvbluetab) > 0): hsvbluetab.pop(len(hsvbluetab) - 1) if (len(hsvbluetab) != 0): hsvbluemin = mintab(hsvbluetab) hsvbluemax = maxtab(hsvbluetab) else: hsvbluemin = [255, 255, 255] hsvbluemax = [0, 0, 0] if c == ord('s'): f = open("last_range.txt", 'w') for hsv in [ hsvredmin, hsvredmax, hsvgreenmin, hsvgreenmax, hsvbluemin, hsvbluemax, hsvyellowmin, hsvyellowmax ]: map(lambda v: f.write(str(int(v)) + ','), hsv) f.write('\n') f.close() print 'saved ranges' if c == ord('l'): f = open("last_range.txt", 'r') lines = f.readlines() [ hsvredmin, hsvredmax, hsvgreenmin, hsvgreenmax, hsvbluemin, hsvbluemax, hsvyellowmin, hsvyellowmax ] = map(lambda l: map(lambda v: int(v), l.split(',')[:-1]), lines) print "loaded ranges:\n" print lines # if c == ord('R') : # step=0 if not pausecam: rgb_image = cv.QueryFrame(capture) cv.Flip(rgb_image, rgb_image, 1) # flip l/r # after blob center detection we need to launch pose estimation if ((blobcentergreen != None) and (blobcenteryellow != None) and (blobcenterblue != None) and (blobcenterred != None)): #order is Yellow,blue,red, green pose_flag = 1 blob_centers = [] blob_centers.append((blobcenteryellow[0] - size_image[0] / 2, blobcenteryellow[1] - size_image[1] / 2)) blob_centers.append((blobcenterblue[0] - size_image[0] / 2, blobcenterblue[1] - size_image[1] / 2)) blob_centers.append((blobcenterred[0] - size_image[0] / 2, blobcenterred[1] - size_image[1] / 2)) blob_centers.append((blobcentergreen[0] - size_image[0] / 2, blobcentergreen[1] - size_image[1] / 2)) # get the tracking matrix (orientation and position) result with # POSIT method in the tracker (camera) referential matrix = find_pose(p_num, blob_centers, modelepoints) # We want to get the tracking result in the world referencial, i.e. with at 60 cm of the midle of the screen, with Y up, and Z behind you. # The tracker referential in the camera referential, with the X axis pointing to the # left, the Y axis pointing down, and the Z axis pointing behind # you, and with the camera as origin. # We thus pre multiply to have the traking results in the world # referential, and not in the tracker (camera) referential. # (pre-product) pre_tranform_matrix = WordToTrackerTransform(matrix) # We do not want to track the center of the body referential (the right up point of the glasses), but the midlle of the two eyes in monoscopic (cyclops eye), # or left and right eyes in stereoscopic. # We thus post multiply the world traking results in the world # referential, using the referential of the eye in the body # referential (glasses) pre_tranform_matrix_post_cylcope_eye = BodyToCyclopsEyeTransform( pre_tranform_matrix) poscyclope = [ pre_tranform_matrix_post_cylcope_eye[3][0], pre_tranform_matrix_post_cylcope_eye[3][1], pre_tranform_matrix_post_cylcope_eye[3][2] ] # print "poscylope", poscyclope pre_tranform_matrix_post_left_eye = BodyToLeftEyeTransform( pre_tranform_matrix) posleft = [ pre_tranform_matrix_post_left_eye[3][0], pre_tranform_matrix_post_left_eye[3][1], pre_tranform_matrix_post_left_eye[3][2] ] # print "posleft",posleft pre_tranform_matrix_post_right_eye = BodyToRightEyeTransform( pre_tranform_matrix) posright = [ pre_tranform_matrix_post_right_eye[3][0], pre_tranform_matrix_post_right_eye[3][1], pre_tranform_matrix_post_right_eye[3][2] ] # print "posright",posright sendPosition("/tracker/head/pos_xyz/cyclope_eye", poscyclope) sendPosition("/tracker/head/pos_xyz/left_eye", posleft) sendPosition("/tracker/head/pos_xyz/right_eye", posright)
cv.NamedWindow("Real",0) # blank lists to store coordinates of blue blob blue = [] while(1): # captures feed from video in color color_image = cv.QueryFrame(capture) # ?? imdraw = cv.CreateImage(cv.GetSize(frame), 8, 3) # ?? cv.SetZero(imdraw) cv.Flip(color_image,color_image, 1) cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0) # ?? imgbluethresh = getthresholdedimg(color_image) cv.Erode(imgbluethresh, imgbluethresh, None, 3) cv.Dilate(imgbluethresh, imgbluethresh, None, 10) # ?? img2 = cv.CloneImage(imgbluethresh) # ?? storage = cv.CreateMemStorage(0) contour = cv.FindContours(imgbluethresh, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE) # blank list into which points for bounding rectangles around blobs are appended points = [] # this is the new part here. ie use of cv.BoundingRect()
def capture_rgb(self): rgb_frame = np.fromstring(self.image_generator.get_raw_image_map_bgr(), dtype=np.uint8).reshape(SCREEN_HEIGHT, SCREEN_WIDTH, 3) image = cv.fromarray(rgb_frame) cv.Flip(image, None, 1) cv.CvtColor(cv.fromarray(rgb_frame), image, cv.CV_BGR2RGB) self.game.frame = pygame.image.frombuffer(image.tostring(), cv.GetSize(image), 'RGB')