def processNoUSB(self, inframe): # Get the next camera image (may block until it is captured) and here convert it to OpenCV BGR. If you need a # grayscale image, just use getCvGRAY() instead of getCvBGR(). Also supported are getCvRGB() and getCvRGBA(): inimg = inframe.getCvBGR() # Start measuring image processing time (NOTE: does not account for input conversion time): self.timer.start() #jevois.LINFO("Processing video frame {} now...".format(self.frame)) # TODO: you should implement some processing. # Once you have some results, send serial output messages: centerX=400 angleOfBall=np.arctan(centerX/658.0) angleDegrees=angleOfBall*180.0/3.14 #jevois.sendSerial("Ball"+str(angleDegrees)) jevois.sendSerial("Ball"+str(angleDegrees)) # Get frames/s info from our timer: fps = self.timer.stop() # Send a serial output message: self.frame += 1
def process(self, inframe, outframe): inimg = inframe.get() self.timer.start() imgbgr = jevois.convertToCvBGR(inimg) h, w, chans = imgbgr.shape outimg = outframe.get() outimg.require("output", w, h + 12, jevois.V4L2_PIX_FMT_YUYV) jevois.paste(inimg, outimg, 0, 0) jevois.drawFilledRect(outimg, 0, h, outimg.width, outimg.height - h, jevois.YUYV.Black) inframe.done() cube = self.detect(imgbgr, outimg) # Load camera calibration if needed: # if not hasattr(self, 'camMatrix'): self.loadCameraCalibration(w, h) if cube is not None: jevois.sendSerial(cube.toJson()) # Write frames/s info from our timer into the edge map (NOTE: does not account for output conversion time): fps = self.timer.stop() jevois.writeText(outimg, fps, 3, h - 10, jevois.YUYV.White, jevois.Font.Font6x10) outframe.send()
def process(self, inframe, outframe): im = inframe.getCvBGR() dbg, re, rc = process_image(im) # entry entry_area, entry_color, entry_h = re # cylinder cylinder_area, cylinder_color = rc entry_area = 0 if entry_area is None else entry_area cylinder_area = 0 if cylinder_area is None else cylinder_area params = dict( entry_color=color_to_rome_color(entry_color), cylinder_color=color_to_rome_color(cylinder_color), entry_height=int(entry_h), entry_area=int(entry_area), cylinder_area=int(cylinder_area), ) frame = rome.Frame('jevois_tm_cylinder_cam', **params) data = frame.data() jevois.sendSerial(data) if outframe is not None: outframe.sendCvBGR(dbg)
def process(self, inframe, outframe): # Get the next camera image (may block until it is captured) and convert it to OpenCV BGR: img = inframe.getCvBGR() # Get image width, height, channels in pixels. Beware that if you change this module to get img as a grayscale # image, then you should change the line below to: "height, width = img.shape" otherwise numpy will throw. See # how it is done in the PythonOpenCv module of jevoisbase: height = img.shape[0] width = img.shape[1] # Start measuring image processing time (NOTE: does not account for input conversion time): self.timer.start() # Draw a couple of things into the image: # See http://docs.opencv.org/3.2.0/dc/da5/tutorial_py_drawing_functions.html for tutorial # See http://docs.opencv.org/3.0-beta/modules/imgproc/doc/drawing_functions.html and # http://docs.opencv.org/3.2.0/d6/d6e/group__imgproc__draw.html for reference manual. cv2.circle(img, (int(width/2), int(height/2)), 100, (255,0,0), 3) cv2.putText(img, "Hello JeVois - frame {}".format(self.frame), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 1, cv2.LINE_AA) # Write frames/s info from our timer (NOTE: does not account for output conversion time): fps = self.timer.stop() cv2.putText(img, fps, (3, height - 6), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1, cv2.LINE_AA) # Convert our image to video output format and send to host over USB: outframe.sendCv(img) # Send a string over serial (e.g., to an Arduino). Remember to tell the JeVois Engine to display those messages, # as they are turned off by default. For example: 'setpar serout All' in the JeVois console: jevois.sendSerial("DONE frame {}".format(self.frame)); self.frame += 1
def process(self, inframe, outframe): # Get the next camera image (may block until it is captured) and here convert it to OpenCV BGR. If you need a # grayscale image, just use getCvGRAY() instead of getCvBGR(). Also supported are getCvRGB() and getCvRGBA(): inimg = inframe.getCvBGR() # Start measuring image processing time (NOTE: does not account for input conversion time): self.timer.start() pucks_circles, outimg = self.find_pucks(inimg, with_output=True) jevois.sendSerial("{}".format(self.serialize_puck_list(pucks_circles))) # Write a title: cv2.putText(outimg, "JeVois PuckDetector", (3, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255)) for i, data in enumerate(pucks_circles): x, y, r, avr, col = data cv2.putText(outimg, "{}, {}, {}, {}, {}".format(x, y, r, avr, col), (3, 40 + 20 * i), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255)) # Write frames/s info from our timer into the edge map (NOTE: does not account for output conversion time): fps = self.timer.stop() height = outimg.shape[0] width = outimg.shape[1] cv2.putText(outimg, fps, (3, height - 6), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255)) # Convert our output image to video output format and send to host over USB: outframe.sendCv(outimg)
def sendAllSerial(self, w, h, hlist, rvecs, tvecs): # Initialize by writing the following serial commands: # setmapping2 YUYV 640 480 30.0 JeVois FirstPython # streamon idx = 0 for c in hlist: # Compute quaternion: FIXME need to check! tv = tvecs[idx] axis = rvecs[idx] angle = (axis[0] * axis[0] + axis[1] * axis[1] + axis[2] * axis[2])**0.5 # This code lifted from pyquaternion from_axis_angle: mag_sq = axis[0] * axis[0] + axis[1] * axis[1] + axis[2] * axis[2] if (abs(1.0 - mag_sq) > 1e-12): axis = axis / (mag_sq**0.5) theta = angle / 2.0 r = math.cos(theta) i = axis * math.sin(theta) q = (r, i[0], i[1], i[2]) # Send x, z displacements and y rotation jevois.sendSerial("X: {} Y: {} Angle: {}".format( np.asscalar(tv[0]) * self.mToFt, np.asscalar(tv[2]) * self.mToFt, np.asscalar(axis[0]))) idx += 1
def process(self, i, val): vvv = val[0] + val[1] + val[2] #print(vvv) isOn = i in self.notesOn on = vvv > thresh if isOn: if not on: del self.notesOn[i] jevois.sendSerial("n" + str(i) + "_0\n") return 0 else: return self.notesOn[i] elif not isOn: if on: val2 = math.floor( min(127, (80 * max(val[0], val[1], val[2]) / thresh))) #print(i,val2) self.notesOn[i] = val2 jevois.sendSerial("n" + str(i) + "_" + str(val2) + "\n") return val2 else: return 0
def process(self, inframe, outframe): jevois.LINFO("process with usb") # Get the next camera image (may block until it is captured): inimg = inframe.get() jevois.LINFO("Input image is {} {}x{}".format(jevois.fccstr(inimg.fmt), inimg.width, inimg.height)) # Get the next available USB output image: outimg = outframe.get() jevois.LINFO("Output image is {} {}x{}".format(jevois.fccstr(outimg.fmt), outimg.width, outimg.height)) # Example of getting pixel data from the input and copying to the output: jevois.paste(inimg, outimg, 0, 0) # We are done with the input image: inframe.done() # Example of in-place processing: jevois.hFlipYUYV(outimg) # Example of simple drawings: jevois.drawCircle(outimg, int(outimg.width/2), int(outimg.height/2), int(outimg.height/4), 2, jevois.YUYV.White) jevois.writeText(outimg, "Hi from Python - @MODULE@", 20, 20, jevois.YUYV.White, jevois.Font.Font10x20) # We are done with the output, ready to send it to host over USB: outframe.send() # Send a string over serial (e.g., to an Arduino). Remember to tell the JeVois Engine to display those messages, # as they are turned off by default. For example: 'setpar serout All' in the JeVois console: jevois.sendSerial("DONE frame {}".format(self.frame)); self.frame += 1
def isAngle(self, contour, hsv, minAngle, maxAngle, goodColor, draw): rectangle = cv2.minAreaRect(contour) box = cv2.boxPoints(rectangle) boxInts = np.int0(box) rows, cols = hsv.shape[:2] [vx, vy, x, y] = cv2.fitLine(contour, cv2.DIST_L2, 0, 0.01, 0.01) leftY = int((-x * vy / vx) + y) rightY = int(((cols - x) * vy / vx) + y) #jevois.sendSerial("vx:" + str(vx) + " vy" + str(vy) + " x" + str(x) + " y:" + str(y)) absY = math.fabs(vy) absX = math.fabs(vx) angle = math.atan(vy / vx) * 180 / 3.1415 inBounds = False color = (0, 0, 255) if angle >= minAngle and angle <= maxAngle: color = goodColor inBounds = True if (draw): try: cv2.line(hsv, (cols - 1, rightY), (0, leftY), color, 2) except: jevois.sendSerial("LeftY: " + str(leftY)) jevois.sendSerial("RightY: " + str(rightY)) return inBounds, angle
def sendAllSerial(self, w, h, hlist, rvecs, tvecs): idx = 0 for c in hlist: # Compute quaternion: FIXME need to check! tv = tvecs[idx] axis = rvecs[idx] angle = (axis[0] * axis[0] + axis[1] * axis[1] + axis[2] * axis[2])**0.5 # This code lifted from pyquaternion from_axis_angle: mag_sq = axis[0] * axis[0] + axis[1] * axis[1] + axis[2] * axis[2] if (abs(1.0 - mag_sq) > 1e-12): axis = axis / (mag_sq**0.5) theta = angle / 2.0 r = math.cos(theta) i = axis * math.sin(theta) q = (r, i[0], i[1], i[2]) jevois.sendSerial("D3 {} {} {} {} {} {} {} {} {} {} FIRST".format( np.asscalar(tv[0]), np.asscalar(tv[1]), np.asscalar(tv[2]), # position self.owm, self.ohm, 1.0, # size r, np.asscalar(i[0]), np.asscalar(i[1]), np.asscalar(i[2]))) # pose idx += 1
def ball(frame): ballImage = prepImage(frame.copy(), [5, 8, 80], [85, 115, 255]) bcopy = ballImage.copy() ballContours, b = cv2.findContours(bcopy, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) cv2.drawContours(frame, ballContours, -1, YELLOW, 3) if len(ballContours) > 1: cleanedBall = [] for i in range(len(ballContours)): ((x, y), radius) = cv2.minEnclosingCircle(ballContours[i]) if (withinTolerance(radius * radius * 3.1415, cv2.contourArea(ballContours[i]), 0.3)): cleanedBall.append(ballContours[i]) # find the largest contour in the mask, then use # it to compute the minimum enclosing circle and # centroid c = max(ballContours, key=cv2.contourArea) ((x, y), radius) = cv2.minEnclosingCircle(c) M = cv2.moments(c) ballCenter = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) ballX = int(M["m10"] / M["m00"]) # only proceed if the radius meets a minimum size if (radius > 10): # draw the circle and centroid on the frame, # then update the list of tracked points cv2.circle(frame, (int(x), int(y)), int(radius), (0, 255, 255), 2) cv2.circle(frame, ballCenter, 5, (0, 0, 255), -1) targetAngleBall = (ballX * DPP) - VIEW_ANGLE / 2 jevois.sendSerial("B" + str(targetAngleBall) + ",")
def processNoUSB(self, inframe): inimg = inframe.getCvBGR() cube = self.detect(inimg) # Load camera calibration if needed: # if not hasattr(self, 'camMatrix'): self.loadCameraCalibration(w, h) if cube is not None: jevois.sendSerial(cube.toJson())
def processCommon(self, inframe, outframe): #Init stuff self.InitFunction(inframe) #Calibrates Camera self.loadCameraCalibration() #Filters image based on predefined filters self.LightFilter() #Finds contours contours=self.findcontours(self.mask) #Filters contours based on width and area filteredContours = self.filtercontours(contours, 100.0, 30.0) #Detects target, will eventually return translation and rotation vector self.TargetDetection(filteredContours) #Data Tracking Stuff self.DataTracker() #sends output over serial jevois.sendSerial("{{{},{},{},{},{},{},{},{},{},{}}}\n".format(self.ret,self.yaw,self.xval,self.yval,self.resetcounter,self.framerate_fps,self.CPULoad_pct,self.CPUTemp_C,self.pipelineDelay_us,self.angle)) #Sends maskoutput if streaming if (self.streamCheck): outframe.sendCvRGB(self.maskoutput)
def process(self, inframe, outframe): # Get the next camera image (may block until it is captured) and here convert it to OpenCV BGR by default. If # you need a grayscale image instead, just use getCvGRAY() instead of getCvBGR(). Also supported are getCvRGB() # and getCvRGBA(): self.timer.start() msg = "detection" test1 = inframe.getCvBGR() gray_img = inframe.getCvGRAY() face_cascade = cv2.CascadeClassifier( 'share/facedetector/haarcascade_frontalface_alt.xml') faces = face_cascade.detectMultiScale(gray_img, scaleFactor=1.1, minNeighbors=5) for (x, y, w, h) in faces: cv2.rectangle(test1, (x, y), (x + w, y + h), (0, 255, 0), 2) jevois.sendSerial("face detected".format(self.frame)) cv2.imwrite( 'modules/JeVois/PythonSandbox/training-data/' + str(self.i) + ".jpg", gray_img[y:y + h, x:x + w]) msg = "Coucou clic Photo" jevois.sendSerial("photo taken") self.frame += 1 self.i += 1 outimg = test1 # Start measuring image processing time (NOTE: does not account for input conversion time): # Detect edges using the Laplacian algorithm from OpenCV: # # Replace the line below by your own code! See for example # - http://docs.opencv.org/trunk/d4/d13/tutorial_py_filtering.html # - http://docs.opencv.org/trunk/d9/d61/tutorial_py_morphological_ops.html # - http://docs.opencv.org/trunk/d5/d0f/tutorial_py_gradients.html # - http://docs.opencv.org/trunk/d7/d4d/tutorial_py_thresholding.html # # and so on. When they do "img = cv2.imread('name.jpg', 0)" in these tutorials, the last 0 means they want a # gray image, so you should use getCvGRAY() above in these cases. When they do not specify a final 0 in imread() # then usually they assume color and you should use getCvBGR() here. # # The simplest you could try is: # outimg = inimg # which will make a simple copy of the input image to output. #outimg = cv2.Laplacian(inimg, -1, ksize=5, scale=0.25, delta=127) # Write a title: cv2.putText(outimg, msg, (3, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1, cv2.LINE_AA) # Write frames/s info from our timer into the edge map (NOTE: does not account for output conversion time): fps = self.timer.stop() height, width, channels = outimg.shape # if outimg is grayscale, change to: height, width = outimg.shape cv2.putText(outimg, fps, (3, height - 6), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1, cv2.LINE_AA) # Convert our BGR output image to video output format and send to host over USB. If your output image is not # BGR, you can use sendCvGRAY(), sendCvRGB(), or sendCvRGBA() as appropriate: outframe.sendCvBGR(outimg)
def process(self, inframe, outframe): img = inframe.getCvBGR() hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) lowerMask = cv2.inRange(hsv, np.array([60, 120, 240]), np.array([100, 255, 255])) upperMask = cv2.inRange(hsv, np.array([60, 120, 240]), np.array([100, 255, 255])) mask = cv2.bitwise_or(lowerMask, upperMask) colorFiltered = cv2.bitwise_and(img, img, mask=upperMask) image, contours, hierarchy = cv2.findContours( cv2.split(colorFiltered)[2], cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_KCOS) goodContours = [] for contour in contours: currentContour = cv2.convexHull(contour, False) poly = cv2.approxPolyDP(currentContour, 3.5, True) if len(poly) < 4 or len(poly) > 8: continue lowX = poly[0][0][0] highX = poly[0][0][0] lowY = poly[0][0][1] highY = poly[0][0][1] for point in poly: if point[0][0] < lowX: lowX = point[0][0] if point[0][0] > highX: highX = point[0][0] if point[0][1] < lowY: lowY = point[0][1] if point[0][1] > highY: highY = point[0][1] width = highX - lowX height = highY - lowY centerX = (lowX + highX) / 2 centerY = (lowY + highY) / 2 ratio = width / height if ratio > 1 and ratio < 5: img = cv2.drawContours(img, [currentContour], 0, (0, 0, 255), 3) goodContours.append([contour, centerX, centerY]) if len(goodContours) == 1 or len(goodContours) == 2: avgX = 0 avgY = 0 for contour in goodContours: avgX += contour[1] avgY += contour[2] avgX /= len(goodContours) avgY /= len(goodContours) jevois.sendSerial("&" + str(160 - avgX) + "," + str(120 - avgY)) else: jevois.sendSerial("&None") outframe.sendCv(img)
def processNoUSB(self, inframe): # Get the next camera image (may block until it is captured) and here convert it to OpenCV BGR. If you need a # grayscale image, just use getCvGRAY() instead of getCvBGR(). Also supported are getCvRGB() and getCvRGBA(): inimg = inframe.getCvBGR() # Start measuring image processing time (NOTE: does not account for input conversion time): self.timer.start() pucks_circles = self.find_pucks(inimg) jevois.LINFO("FUUUUCK") jevois.sendSerial("{}".format(self.serialize_puck_list(pucks_circles)))
def process(self, inframe, outframe): # process the image and get the output image and the serial data outimg, text, data = self.run(inframe) # write vision calculations on camera cv2.putText(outimg, text, (3, 20), cv2.FONT_HERSHEY_SIMPLEX, \ 0.5, (255,255,255)) # send the image outframe.sendCv(outimg) # send the serial data jevois.sendSerial(data)
def process(self, inframe, outframe): # Get the next camera image (may block until it is captured) and here convert it to OpenCV BGR. If you need a # grayscale image, just use getCvGRAY() instead of getCvBGR(). Also supported are getCvRGB() and getCvRGBA(): inimg = inframe.getCvBGR() outframe.sendCv(inimg) # # cv2.putText(outimg, fps, (3, height - 6), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255)) # cv2.putText(outimg, str(len(squares)), (3, height - 6), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255)) jevois.sendSerial("With usb\r\n") self.datafile.write("wrote within processNoUsb\r\n") self.frame += 1
def process(self, inframe, outframe): #jevois.drawRect(inframe,50,50,50,50,5,0) inimg = inframe.getCvBGR() self.frame += 1 # Send processed data jevois.LINFO("{-35,20,105,234}") jevois.sendSerial("DONE frame {} \n".format(self.frame)) # We are done with the output, ready to send it to host over USB: outframe.sendCvBGR(inimg)
def process(self, inframe, outframe): # Get the next camera image and convert it to OpenCV BGR: inimg = inframe.getCvBGR() # Start measuring image processing time self.timer.start() # resize inimg = cv2.resize(inimg, (640, 480), 0, 0, interpolation) # convert BGR to HSV inimgHSV = cv2.cvtColor(inimg, cv2.COLOR_BGR2HSV) # create the Mask mask = cv2.inRange(inimgHSV, lowerBound, upperBound) # morphology maskOpen = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernelOpen) maskClose = cv2.morphologyEx(maskOpen, cv2.MORPH_CLOSE, kernelClose) # find contours im2, conts, h = cv2.findContours(maskClose, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) # loop over contours for i in range(len(conts)): perimeter = cv2.arcLength(conts[i], True) approx = cv2.approxPolyDP(conts[i], 0.02 * perimeter, True) # check contour verticies for rectangle, ideally there will be 4 but may be more at a distance if len(approx) <= 6: x, y, w, h = cv2.boundingRect(conts[i]) # compare aspect ratio (tape has destince aspect ratio due to large length compared to width) if ((w / h) < .25 and (w / h) > .1): cv2.rectangle(inimg, (x, y), (x + w, y + h), (0, 0, 255), 2) targetCenterX = x + (w / 2) targetCenterY = y - (h / 2) distanceToTarget = (focalLength * 2) / w degreesToTarget = (centerHorizonPixels - x) / pixelToDeg jevois.sendSerial( "Target # {} w {} distance {} angle {}".format( i, w, distanceToTarget, degreesToTarget)) # Write frames/s info from our timer into the edge map fps = self.timer.stop() outframe.sendCvBGR(inimg)
def processNoUSB(self, inframe): # Create a parallel processing pool and a timer, if needed (on first frame only): if not hasattr(self, 'pool'): # create a multiprocessing pool, not specifying the number of processes, to use the number of cores: self.pool = mp.Pool() # Instantiate a JeVois Timer to measure our processing framerate: self.timer = jevois.Timer("PythonParallel", 100, jevois.LOG_INFO) # Get the next camera image (may block until it is captured) and convert it to OpenCV GRAY: inimggray = inframe.getCvGRAY() # Start measuring image processing time (NOTE: does not account for input conversion time): self.timer.start() # Detect edges using the Canny algorithm from OpenCV, launching 4 instances in parallel: futures = [ self.pool.apply_async(computefunc, args=( inimggray, 10 * x, 20 * x, )) for x in range(1, 5) ] # Collect the results, handling any exception thrown by the workers. Here, we make sure we get() all the results # first, then rethrow the last exception received, if any, so that we do ensure that all results will be # collected before we bail out on an exception: results = [] error = 0 for ii in range(4): try: results.append(futures[ii].get(timeout=10)) except Exception as e: error = e if error: raise error # In real modules, we would do something with the results... Here, just report their size: str = "" for ii in range(4): h, w = results[ii].shape str += "Canny {}: {}x{} ".format(ii, w, h) # Send a message to serout: jevois.sendSerial(str) # Report frames/s info to serlog: self.timer.stop()
def send_message(self, mark, pos): ''' send message over serial link to AP ''' (u, v), (w, h), _ = pos x, y = 0., 0. if self.calib_fisheye is None: # pos in "mm" in image frame x = 1000. * (u - self.center[0]) / self.focal[0] y = 1000. * (v - self.center[1]) / self.focal[1] else: pts_uv = np.array([[[u, v]]], dtype=np.float32) undist = cv2.fisheye.undistortPoints(pts_uv, self.calib_fisheye[0], self.calib_fisheye[1]) x = 1000. * undist[0][0][0] y = 1000. * undist[0][0][1] jevois.sendSerial("N2 {} {:.2f} {:.2f} {:.2f} {:.2f}".format( mark, x, y, w, h))
def process(self, inframe, outframe): global lastimg global lastcolorimg global x #num of changed pixels before considering the scene different # Get the next camera image (may block until it is captured) and here convert it to OpenCV BGR. If you need a # grayscale image, just use getCvGRAY() instead of getCvBGR(). Also supported are getCvRGB() and getCvRGBA(): inimg = inframe.getCvGRAY() colorimg = inframe.getCvBGR() if lastimg is None: lastimg = inframe.getCvGRAY() lastcolorimg = colorimg # Start measuring image processing time (NOTE: does not account for input conversion time): self.timer.start() diffimg= cv2.absdiff(inimg,lastimg) # compare two images. Will be all black if they are the same. totaldiff = np.sum(diffimg) #count how many pixels are not black if totaldiff > x: #the image has changed, update it. outimg=colorimg lastcolorimg=colorimg jevois.sendSerial("Frame Changed"); else: #outimg = diffimg #use for checking the tolerances outimg = lastcolorimg time.sleep(.1) lastimg = inframe.getCvGRAY() # Write a title: cv2.putText(outimg, "JeVois CV_Diff", (3, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255)) # Write frames/s info from our timer into the edge map (NOTE: does not account for output conversion time): fps = self.timer.stop()#not used height = outimg.shape[0] width = outimg.shape[1] cv2.putText(outimg, str(totaldiff), (3, height - 6), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255)) # Convert our output image to video output format and send to host over USB: outframe.sendCv(outimg) # Example of sending some serial output message: # jevois.sendSerial("DONE frame {}".format(self.frame)); self.frame += 1 jevois.sendSerial("diff: {}".format(str(totaldiff)));
def send_serial(self, color): if color == BallColor.RED: jevois.sendSerial("RED") elif color == BallColor.BLUE: jevois.sendSerial("BLUE") else: jevois.sendSerial("NONE")
def processAndSend(self, source0): capture_timestamp = time.time() # numpy.array(source0, copy=True) self.reflectiveVision.process(source0) height, width, _ = source0.shape if self.cameraMatrix is None or self.distortionMatrix is None: self.load_camera_calibration(width, height) json_pair_list = [] for pair in self.reflectiveVision.pairs: lx, ly, lw, lh = pair.left.bounding_rect rx, ry, rw, rh = pair.right.bounding_rect image_corners = np.array( [[lx, ly + lh], [lx, ly], [rx + rw, ry], [rx + rw, ry + rh]], dtype=np.float) pair.solvePnPData = cv2.solvePnP(self.target_coords, image_corners, self.cameraMatrix, self.distortionMatrix) retval, rvec, tvec = pair.solvePnPData if retval: output = self.compute_output_values(rvec, tvec) json_pair_list.append({ "angle": -output[1], "rotation": -output[2], "distance": output[0] }) send_timestamp = time.time() capture_ago = send_timestamp - capture_timestamp jevois.sendSerial( json.dumps({ "is_front": self.is_front_camera, "capture_ago": capture_ago, "targets": json_pair_list }))
def processNoUSB(self, inframe): font = cv.FONT_HERSHEY_PLAIN siz = 0.8 white = (255, 255, 255) # Load the network if needed: if not hasattr(self, 'net'): backend = cv.dnn.DNN_BACKEND_DEFAULT target = cv.dnn.DNN_TARGET_CPU self.classes = [ "neutral", "happiness", "surprise", "sadness", "anger", "disgust", "fear", "contempt" ] self.model = 'FER+ ONNX' self.net = cv.dnn.readNet('/jevois/share/opencv-dnn/classification/emotion_ferplus.onnx', '') self.net.setPreferableBackend(cv.dnn.DNN_BACKEND_DEFAULT) self.net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU) # Get the next frame from the camera sensor: frame = inframe.getCvBGR() self.timer.start() frameHeight = frame.shape[0] frameWidth = frame.shape[1] #mid = int((frameWidth - 110) / 2) + 110 # x coord of midpoint of our bars #leng = frameWidth - mid - 6 # max length of our bars maxconf = 999 # Create a 4D blob from a frame. gframe = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) blob = cv.dnn.blobFromImage(gframe, self.scale, (self.inpWidth, self.inpHeight), self.mean, self.rgb, crop=True) # Run the model self.net.setInput(blob) out = self.net.forward() # Show the scores for each class: out = out.flatten() # Create dark-gray (value 80) image for the bottom panel, 96 pixels tall and show top-1 class: #msgbox = np.zeros((96, frame.shape[1], 3), dtype = np.uint8) + 80 jevois.sendSerial('> mood: '+str(round(out[0]*100,2)) + ' ' + str(round(out[1]*100,2)) + ' ' +str(round(out[2]*100,2))+' '+str(round(out[3]*100,2)) + ' ' + str(round(out[4]*100,2)) + ' ' +str(round(out[5]*100,2)) + ' ' + str(round(out[6]*100,2)) + ' ' +str(round(out[7]*100,2)));
def process(self, inframe, outframe): # Get the next camera image (may block until it is captured) and here convert it to OpenCV BGR. If you need a # grayscale image, just use getCvGRAY() instead of getCvBGR(). Also supported are getCvRGB() and getCvRGBA(): inimg = inframe.getCvBGR() # Start measuring image processing time (NOTE: does not account for input conversion time): self.timer.start() # Detect edges using the Laplacian algorithm from OpenCV: # # Replace the line below by your own code! See for example # - http://docs.opencv.org/trunk/d4/d13/tutorial_py_filtering.html # - http://docs.opencv.org/trunk/d9/d61/tutorial_py_morphological_ops.html # - http://docs.opencv.org/trunk/d5/d0f/tutorial_py_gradients.html # - http://docs.opencv.org/trunk/d7/d4d/tutorial_py_thresholding.html # # and so on. When they do "img = cv2.imread('name.jpg', 0)" in these tutorials, the last 0 means they want a # gray image, so you should use getCvGRAY() above in these cases. When they do not specify a final 0 in imread() # then usually they assume color and you should use getCvBGR() above. # # The simplest you could try is: # outimg = inimg # which will make a simple copy of the input image to output. outimg = cv2.Laplacian(inimg, -1, ksize=5, scale=0.25, delta=127) # Write a title: cv2.putText(outimg, "JeVois @MODULE@", (3, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255)) # Write frames/s info from our timer into the edge map (NOTE: does not account for output conversion time): fps = self.timer.stop() height = outimg.shape[0] width = outimg.shape[1] cv2.putText(outimg, fps, (3, height - 6), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255)) # Convert our output image to video output format and send to host over USB: outframe.sendCv(outimg) # Example of sending some serial output message: jevois.sendSerial("DONE frame {}".format(self.frame)); self.frame += 1
def tellRobot(out_center_x, out_center_y, serial_format="XY"): if self.bbox is None: jevois.sendSerial("stop") else: box_center_x, box_center_y = self.bbox[ 0] + self.bbox[2] / 2, self.bbox[1] + self.bbox[3] / 2 if serial_format == "XY": if out_center_x < box_center_x: move_x = box_center_x - out_center_x elif box_center_x < out_center_x: move_x = out_center_x - box_center_x elif box_center_x == out_center_x: move_x = 0 if out_center_y < box_center_y: move_y = box_center_y - out_center_y elif box_center_y < out_center_y: move_y = out_center_y - box_center_y elif box_center_y == out_center_y: move_y = 0 if move_x < 100: move_x = 100 if move_y < 100: move_y = 100 jevois.sendSerial("move {} {}".format( int(move_x), int(move_y))) else: jevois.sendSerial("Invalid Serial Format")
def process(self, inframe, outframe): Id = 0 Recognizer = cv2.face.LBPHFaceRecognizer_create() Recognizer.read('share/facedetector/trainingData.yml') confidence = 0.0 color_img = inframe.getCvBGR() gray = inframe.getCvGRAY() haar = cv2.CascadeClassifier( 'share/facedetector/haarcascade_frontalface_alt.xml') faces = haar.detectMultiScale(gray, 1.1, 5) for (x, y, w, h) in faces: cv2.rectangle(color_img, (x, y), (x + w, y + h), (0, 255, 0), 2) Id, confidence = Recognizer.predict(gray[y:y + h, x:x + w]) if Id == 0: self.facename = "Unknown" else: self.facename = self.facenames[Id - 1] jevois.sendSerial("Face:" + str(self.facename)) outimg = color_img # Write a title: cv2.putText(outimg, str(self.facename), (3, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1, cv2.LINE_AA) # Write frames/s info from our timer into the edge map (NOTE: does not account for output conversion time): fps = self.timer.stop() height, width, channels = outimg.shape # if outimg is grayscale, change to: height, width = outimg.shape cv2.putText(outimg, str(confidence), (3, height - 6), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1, cv2.LINE_AA) # Convert our BGR output image to video output format and send to host over USB. If your output image is not # BGR, you can use sendCvGRAY(), sendCvRGB(), or sendCvRGBA() as appropriate: outframe.sendCvBGR(outimg)
def processNoUSB(self, inframe): # Get the next camera image (may block until it is captured) and here convert it to OpenCV BGR. If you need a # grayscale image, just use getCvGRAY() instead of getCvBGR(). Also supported are getCvRGB() and getCvRGBA(): inimg = inframe.getCvBGR() # Start measuring image processing time (NOTE: does not account for input conversion time): self.timer.start() #jevois.LINFO("Processing video frame {} now...".format(self.frame)) # TODO: you should implement some processing. # Once you have some results, send serial output messages: self.countup=self.countup+0.1 jevois.sendSerial("Target 10 10 10") self.datafile.write("Target data written\r\n") # Get frames/s info from our timer: fps = self.timer.stop() # Send a serial output message: self.frame += 1