def processFrame(self, image): # Cut 'target' # Not to be misleading here - target is the general-purpose data # we will get back from processFrame. It is NOT a target object target, frame = algo.processFrame(image, cfg=self.algoConfig) # XXX: Cut if self.commChan: if target is not None: self.commChan.UpdateVisionState("Acquired") # -== PNP version ==- if self.algoConfig["algo"] == "pnp": # PNP self.algoConfig["state"]["TargetPNP"].poseValue = target self.algoConfig["state"]["TargetPNP"].send() # -== PID version ==- if self.algoConfig["algo"] == "pid": # PID self.algoConfig["state"]["TargetPID"].valuehOffSet = target self.algoConfig["state"]["TargetPID"].send() else: self.commChan.UpdateVisionState("Searching")
def streamAlgo(self, cam, algoselector): global s_first (algoselector + " algo streaming") cam.start() while True: camframe = cam.next() target, frame = algo.processFrame(camframe, algoselector, cfg=s_config["algo"], display=True, debug=False) if target != None: logging.debug(str(target)) else: logging.info("no target") if s_comm != None: if target != None: s_comm.UpdateVisionState("Acquired") target.send() else: s_comm.UpdateVisionState("Searching") rc, jpg = cv2.imencode('.jpg', frame, s_jpgParam) if not rc: continue if s_first: logging.debug("jpg file size: %s" % jpg.size) s_first = False self.wfile.write(bytes("--jpgboundary\n", "utf-8")) self.send_header('Content-type', 'image/jpeg') self.send_header('Content-length', jpg.size) self.end_headers() self.wfile.write(jpg.tostring()) time.sleep(0.05)
def streamAlgo(self, cam, algoselector): global s_first (algoselector + " algo streaming") # Setting the value inside the config s_config["algo"]["algo"] = algoselector s_config["algo"]["disply"] = True cam.start() cam.startThread() while True: camframe = cam.imageQueue.get() target, frame = algo.processFrame(camframe, cfg=s_config["algo"]) self.endTime = time.monotonic() if target != None: logging.debug(str(target)) else: logging.info("no target") if s_comm != None: if target != None: s_comm.UpdateVisionState("Acquired") target.send() else: s_comm.UpdateVisionState("Searching") rc, jpg = cv2.imencode('.jpg', frame, s_jpgParam) if not rc: continue if s_first: logging.debug("jpg file size: %s" % jpg.size) s_first = False self.wfile.write(bytes("--jpgboundary\n", "utf-8")) self.send_header('Content-type', 'image/jpeg') self.send_header('Content-length', jpg.size) self.end_headers() self.wfile.write(jpg.tostring())
def processFrame(self, image): # Cut 'target' # NOTE: Interesting, frame get dropped on the floor here if self.algoConfig["algo"] == "pnp": robotPose, yawOffset, frame = algo.processFrame( image, cfg=self.algoConfig) if self.algoConfig["algo"] == "pid": yawOffset, frame = algo.processFrame(image, cfg=self.algoConfig) # XXX: Cut if self.commChan: # -== PNP version ==- if self.algoConfig["algo"] == "pnp": if robotPose != None: self.commChan.UpdateVisionState("Acquired") # PNP self.algoConfig["state"]["TargetPNP"].poseValue = robotPose self.algoConfig["state"]["TargetPNP"].send() # PID self.algoConfig["state"][ "TargetPID"].valuehOffSet = yawOffset self.algoConfig["state"]["TargetPID"].send() else: self.commChan.UpdateVisionState("Searching") # -== PID version ==- if self.algoConfig["algo"] == "pid": if yawOffset != None: self.commChan.UpdateVisionState("Acquired") # PID self.algoConfig["state"][ "TargetPID"].valuehOffSet = yawOffset self.algoConfig["state"]["TargetPID"].send() else: self.commChan.UpdateVisionState("Searching")
def processFrame(self, image): # called on each frame in the video logging.debug(" (multi threaded)") target, frame = algo.processFrame(image, cfg=self.config["algo"]) if self.commChan: if target != None: self.commChan.UpdateVisionState("Aquired") target.send() else: self.commChan.UpdateVisionState("Searching")
def processFrame(self, image): logging.debug(" (multi threaded)") target, frame = algo.processFrame(image, algo=self.args.algo, cfg=self.config["algo"], display=self.args.display, debug=self.args.debug) if target != None: logging.debug("Target value is: " + str(target)) if self.commChan: if target != None: self.commChan.UpdateVisionState("Acquired") target.send() else: self.commChan.UpdateVisionState("Searching")
def processFrame(self, image): abort = False if self.commChan: self.target.clock = time.clock() self.commChan.SetTarget(self.target) frame = algo.processFrame(image, algo=self.args.algo) if self.args.display: cv2.imshow("Frame", frame) key = cv2.waitKey(1) & 0xFF if key == ord("q") or key == 27: abort = True elif key == 255: pass elif key == 22: print("22 --") else: print(key) return abort
def processFrame(self, image): abort = False dirtyx, frame = algo.processFrame(image, algo=self.args.algo, display=self.args.display, debug=self.args.debug) if (self.args.debug): print("Dirtyx is at: ", dirtyx) print("self.target.anglex is at: ", self.target.angleX) if self.commChan: self.target.clock = time.clock() if (dirtyx > 25): #Largest angle we expect is 22 #logging.debug(datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')) #logging.debug("Sent a 'searching' to networktables\n") self.commChan.updateVisionState("Searching") else: #logging.debug(datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')) #logging.debug("Sent a 'aquired' to networktables\n", ) self.commChan.updateVisionState("Aquired") # sending 'aquired' may be independent of the fact that we send a new target over if (dirtyx != self.target.angleX): if (self.args.debug): print("self.target.angleX is being changed to...", dirtyx) # Fix 0 bug? #logging.debug(datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')) #logging.debug("DirtyX is currently at:", dirtyx) self.target.angleX = dirtyx # Not setting dy, because that may mess things up self.commChan.SetTarget(self.target) if self.args.display: cv2.imshow("Frame", frame) key = cv2.waitKey(1) & 0xFF if key == ord("q") or key == 27: abort = True elif key == 255: pass elif key == 22: print("22 --") else: print(key) return abort
#args = vars(ap.parse_args()) args = ap.parse_args() # single threaded -------------------------------------------------- # grab a pointer to the video stream and initialize the FPS counter print("[INFO] sampling NONTHREADED frames from picam...") vs = PiVideoStream(framerate=60) # nb: we don't call start here fps = FPS().start() # loop over some frames while fps._numFrames < args.num_frames: # grab the frame from the stream and resize it to have a maximum # width of 400 pixels frame = vs.next() sys.stderr.write('-') frame = algo.processFrame(frame) # check to see if the frame should be displayed to our screen if args.display > 0: cv2.imshow("Frame", frame) key = cv2.waitKey(1) & 0xFF # update the FPS counter fps.update() # stop the timer and display FPS information fps.stop() print("[INFO] elasped time: {:.2f}".format(fps.elapsed())) print("[INFO] approx. FPS: {:.2f}".format(fps.fps())) # do a bit of cleanup