def __init__(self): # Define camera settings # put bright, contrast, saturation, hue into image_processing_param.yaml file self.vid = cv2.VideoCapture( rospy.get_param("/videoDevicePath" )) # Sets the port /dev/video6 as the video device self.vid.set(10, rospy.get_param("/brightness")) # brightness self.vid.set(11, rospy.get_param("/contrast")) # contrast self.vid.set(12, rospy.get_param("/saturation")) # saturation self.vid.set(13, rospy.get_param("/hue")) # hue # Decalre calibration matrices to rectify the image self.mtx = np.array(rospy.get_param("/mtx")) self.dist = np.array(rospy.get_param("/dist")) # Camera resolution self.width = rospy.get_param("/width") self.height = rospy.get_param("/height") # Reference velocity self.v_ref = rospy.get_param("/reference_velocity") # Number of points for moving average filter self.numMovingAveragePoints = rospy.get_param( "/numMovingAveragePoints") self.movingAverageValue = np.zeros([2, self.numMovingAveragePoints]) # Number of sample points at the reference velocity to check along the path self.numpoints = rospy.get_param("/numStepsToLookAhead") # Set node loop rate (30 hz) self.loop_rate = rospy.get_param("/loop_rate") self.dt = 1.0 / self.loop_rate self.rate = rospy.Rate(self.loop_rate) self.f1Matrix = rospy.get_param("/f1Matrix") self.f2Matrix = rospy.get_param("/f2Matrix") self.bMatrix = rospy.get_param("/bMatrix") self.yPixel_to_xInertial_Matrix = rospy.get_param( "/yPixel_to_xInertial_Matrix") self.xInertial_to_yPixel_Matrix = rospy.get_param( "/xInertial_to_yPixel_Matrix") self.furthest_distance = rospy.get_param("/furthest_distance") self.camera_offset_distance = rospy.get_param( "/camera_offset_distance") self.flipped_camera = rospy.get_param("/flipped_camera") # Compute the udistortion and rectification transformation map self.newcameramtx, self.roi = cv2.getOptimalNewCameraMatrix( self.mtx, self.dist, (self.width, self.height), 0, (self.width, self.height)) self.mapx, self.mapy = cv2.initUndistortRectifyMap( self.mtx, self.dist, None, self.newcameramtx, (self.width, self.height), 5) # Messages to be filled self.state_constraints = barc_state() self.reference_trajectory = barc_state() self.bridge = CvBridge() # Initialize publishers and subscribers self.moving_pub = rospy.Publisher("moving", Moving, queue_size=1) self.hold_previous_turn_pub = rospy.Publisher("hold_previous_turn", Bool, queue_size=1) self.moving_pub.publish(True) self.reference_trajectory_pub = rospy.Publisher("reference_trajectory", barc_state, queue_size=1) self.reference_image_pub = rospy.Publisher("image_raw", Image, queue_size=1) self.uOpt_pub = rospy.Publisher("uOpt", Input, queue_size=1) self.optimal_state_sub = rospy.Subscriber("optimal_state_trajectory", barc_state, self.convertDistanceToPixels) self.dt_pub = rospy.Publisher("dt", Float64, queue_size=1) # The boolean messages passed to these topics are not used, we only want them for the independently threaded callback function. self.draw_lines_pub = rospy.Publisher("draw_lines", Bool, queue_size=1) self.draw_lines_sub = rospy.Subscriber("draw_lines", Bool, self.draw_lines, queue_size=1) self.publish_states_pub = rospy.Publisher("publish_states", Bool, queue_size=1) self.publish_states_sub = rospy.Subscriber("publish_states", Bool, self.publish_states, queue_size=1) self.show_Image_pub = rospy.Publisher("show_Image", Bool, queue_size=1) self.show_Image_sub = rospy.Subscriber("show_Image", Bool, self.show_Image, queue_size=1) self.count = 0 self.totalTimeCounter = 0 self.totalTime = 0 self.averageTime = 0 self.publish_image = True self.previousTime = time.time() self.printme = False self.statepoints = '' self.camera_distance_calibrated = False print( "Press Up Arrow to start moving. Press Down Arrow to stop moving.") while not rospy.is_shutdown(): try: self.count = self.count + 1 # updates the count self.rel, self.dst = self.vid.read( ) # gets the current frame from the camera # Updates the sample time self.dt = time.time() - self.previousTime self.previousTime = time.time() if self.flipped_camera: self.cv_image = cv2.flip( cv2.remap(self.dst, self.mapx, self.mapy, cv2.INTER_LINEAR), -1) #Undistorts the fisheye image to rectangular else: self.cv_image = cv2.remap( self.dst, self.mapx, self.mapy, cv2.INTER_LINEAR ) #Undistorts the fisheye image to rectangular self.x, self.y, self.width, self.height = self.roi # colorFilter = True makes the edge detection search for a red/white track using HSV. False will use grayscale and search for any edge regardless of color # colorFilter = rospy.get_param("/colorFilter") colorFilter = False kernel_size = rospy.get_param("/kernel_size") if colorFilter: imageToFilter = self.cv_image imageToFilter[ 0:280, 0:self. width] = 0 #blacks out the top portion of the image (not used) #self.hsv = cv2.cvtColor(imageToFilter, cv2.COLOR_BGR2HSV) #.004 # define range of color thresholds in (B,G,R) lower_red = np.flipud( np.array(rospy.get_param("/lower_red"))) upper_red = np.flipud( np.array(rospy.get_param("/upper_red"))) lower_white = np.flipud( np.array(rospy.get_param("/lower_white"))) upper_white = np.flipud( np.array(rospy.get_param("/upper_white"))) # Threshold the image to only have the red/white track appear self.reds = cv2.inRange(imageToFilter, lower_red, upper_red) self.whites = cv2.inRange(imageToFilter, lower_white, upper_white) self.edges = cv2.bitwise_or( self.reds, self.whites ) # combines the red filter and white filter images self.edges = cv2.GaussianBlur(self.edges, (kernel_size, kernel_size), 0) # blurs the image retval, self.edges = cv2.threshold( self.edges, 127, 255, cv2.THRESH_BINARY ) # converts the blurred greyscale to binary to filter once more else: # Convert Color Image to Grayscale gray_image = cv2.cvtColor(self.cv_image, cv2.COLOR_BGR2GRAY) gray_image[0:270, 0:self.width] = 0 gray_image = cv2.GaussianBlur(gray_image, (kernel_size, kernel_size), 0) self.edges = cv2.Canny( gray_image, 40, 80) # openCV edge detection function # Parameters to combine image: dst = alpha*img1+beta*img2+gamma alpha = 0.6 beta = 1 gamma = 0 # overlayPointsOnColoredImage = True makes the path show up on top of the colored image. # Draw lanes over image (color or black/white) overlayPointsOnColoredImage = rospy.get_param( "/overlayPointsOnColoredImage") if overlayPointsOnColoredImage: self.line_img_color = np.zeros(self.cv_image.shape, dtype=np.uint8) self.pathOverlayedImage = cv2.addWeighted( self.cv_image, alpha, self.line_img_color, beta, gamma) else: self.edges_color = cv2.cvtColor(self.edges, cv2.COLOR_GRAY2RGB) self.line_img_color = np.zeros(self.edges_color.shape, dtype=np.uint8) self.pathOverlayedImage = cv2.addWeighted( self.edges_color, alpha, self.line_img_color, beta, gamma) # Collect 100 images before running image processing if self.count > 100: self.draw_lines_pub.publish(True) # Publish image with lanes if self.publish_image: if True: self.reference_image_pub.publish( self.bridge.cv2_to_imgmsg( cv2.cvtColor(self.edges, cv2.COLOR_GRAY2RGB), "bgr8")) else: print('Could not publish reference image') # Check the true loop rate of incoming images from camera (i.e. frames per second should match parameter specified in launch file) if (self.count > 100 and self.count % 3 == 0): self.totalTimeCounter += 1 self.timenext = time.time() self.timeElapsed = self.timenext - self.previousTime self.totalTime = self.totalTime + self.timeElapsed self.averageTime = self.totalTime / (self.totalTimeCounter) self.dt_pub.publish(self.averageTime) #print('Average Time: ',self.averageTime) self.rate.sleep() except IOError, (ErrorNumber, ErrorMessage): print('HERE') print('HERE') print(ErrorMessage) pass
def __init__(self): self.vid = cv2.VideoCapture("/dev/video6") self.vid.set(12, 5) #contrast self.vid.set(13, 255) #saturation # Calibration Matrices self.mtx = np.array([[592.156892, 0.000000, 326.689246], [0.000000, 584.923917, 282.822026], [0.000000, 0.000000, 1.000000]]) self.dist = np.array( [-0.585868, 0.248490, -0.023236, -0.002907, 0.000000]) self.rel, self.dst = self.vid.read() # Camera resolution self.w = 640 self.h = 480 # Reference velocity self.v_ref = 1.2 # Number of moving average points self.sx = 5 self.movmean = np.zeros([2, self.sx]) # Set node rate self.loop_rate = 30 self.ts = 1.0 / self.loop_rate self.rate = rospy.Rate(self.loop_rate) self.t0 = time.time() self.dt = self.ts self.count = 0 self.incount = 0 self.total = 0 self.avg = 0 self.total2 = 0 self.avg2 = 0 self.publish_image = False self.timeprev = time.time() - self.dt time.sleep(.5) # Compute the udistortion and rectification transformation map self.newcameramtx, self.roi = cv2.getOptimalNewCameraMatrix( self.mtx, self.dist, (self.w, self.h), 0, (self.w, self.h)) self.mapx, self.mapy = cv2.initUndistortRectifyMap( self.mtx, self.dist, None, self.newcameramtx, (self.w, self.h), 5) self.statepoints = '' self.printme = True self.state_constraints = barc_state() self.reference_trajectory = barc_state() # Initialize publishers and subscribers self.moving_pub = rospy.Publisher("moving", Moving, queue_size=1) self.moving_pub.publish(True) self.reference_trajectory_pub = rospy.Publisher("reference_trajectory", barc_state, queue_size=1) self.reference_image_pub = rospy.Publisher( "image_reference_trajectory", Image, queue_size=10) self.uOpt_pub = rospy.Publisher("uOpt", Input, queue_size=1) while not rospy.is_shutdown(): try: self.count = self.count + 1 self.rel, self.dst = self.vid.read( ) # gets the current frame from the camera self.dt = time.time() - self.timeprev self.timeprev = time.time() self.cv_image = cv2.remap( self.dst, self.mapx, self.mapy, cv2.INTER_LINEAR ) #Undistorts the fisheye image to rectangular self.x, self.y, self.w, self.h = self.roi self.dst = self.dst[self.y:self.y + self.h, self.x:self.x + self.w] # yellow = True makes the edge detection search for a yellow track using HSV. False will use grayscale and search for any edge regardless of color yellow = True kernel_size = 5 if yellow: cropped = self.cv_image cropped[0:280, 0:640] = 0 #cropped = cv2.GaussianBlur(cropped,(kernel_size,kernel_size),0) #0.017s #cropped = cv2.medianBlur(cropped,kernel_size) hsv = cv2.cvtColor(cropped, cv2.COLOR_BGR2HSV) #.004 #hsv = cv2.GaussianBlur(hsv,(kernel_size,kernel_size),0) #######cv2.imshow('hsv',hsv[270:480,:]) """ THIS IS THE YELLLOW LINE DETECION CODE # define range of blue color in HSV (B,G,R) lower_yellow = np.array([0,180,100]) upper_yellow = np.array([50,255,255]) # Threshold the HSV image to get only blue colors edges = cv2.inRange(hsv, lower_yellow, upper_yellow) #0.03s """ # UNCOMMENT THIS FOR THE REDWHITE TRACK lower_red = np.array([0, 0, 180]) upper_red = np.array([50, 120, 255]) lower_white = np.array([150, 150, 150]) upper_white = np.array([255, 255, 255]) # Threshold the HSV image to get only blue colors self.reds = cv2.inRange(self.cv_image, lower_red, upper_red) self.whites = cv2.inRange(self.cv_image, lower_white, upper_white) #0.03s edges = cv2.bitwise_or(self.reds, self.whites) edges = cv2.GaussianBlur(edges, (kernel_size, kernel_size), 0) #edges = cv2.cvtColor(edges, cv2.COLOR_BGR2GRAY) #cv2.imshow("hsv to gray",edges) #cv2.imshow("Edges",edges[270:480,:]) ####edges = cv2.GaussianBlur(edges,(kernel_size,kernel_size),0) #edges = cv2.Canny(edges,10,200) #######cv2.imshow("Edges2",edges[270:480,:]) edgescropped = edges else: # Convert Color Image to Grayscale gray_image = cv2.cvtColor(self.cv_image, cv2.COLOR_BGR2GRAY) gray_image[0:270, 0:640] = 0 gray_image = cv2.GaussianBlur(gray_image, (kernel_size, kernel_size), 0) #cv2.imshow("blurred Image", gray_image) #in MPC lab, 13 15 100 edges = cv2.Canny(gray_image, 40, 80) cv2.imshow("Advanced Lane Detection ed", edges[270:480, :]) # whitecount = cv2.countNonZero(edges) edgescropped = edges #print(time.time() - self.timeprev) alpha = .6 beta = 1. gamma = 0 # Colored = True makes the path show up on top of the colored image. colored = False if colored: line_img_color = np.zeros(self.cv_image.shape, dtype=np.uint8) midpointlist, leftlist, rightlist = self.draw_lines( line_img_color, edgescropped) LinesDrawn2_color = cv2.addWeighted( self.cv_image, alpha, line_img_color, beta, gamma) else: edges_color = cv2.cvtColor(edgescropped, cv2.COLOR_GRAY2RGB) line_img_color = np.zeros(edges_color.shape, dtype=np.uint8) midpointlist, leftlist, rightlist = self.draw_lines( line_img_color, edges) LinesDrawn2_color = cv2.addWeighted( edges_color, alpha, line_img_color, beta, gamma) fontFace = cv2.FONT_HERSHEY_TRIPLEX self.publish_states(midpointlist, leftlist, rightlist) cv2.imshow("Advanced Lane Detection", LinesDrawn2_color[270:480, :]) if self.publish_image: try: self.reference_image_pub.publish( self.bridge.cv2_to_imgmsg(LinesDrawn2_color, "bgr8")) except: # CvBridgeError as e: pass #print(e) if (self.count > 100 and self.count % 50 == 0): self.incount += 1 self.timenext = time.time() self.timeElapsed = self.timenext - self.timeprev self.total2 = self.total2 + self.timeElapsed self.avg2 = self.total2 / (self.incount) print('Average Time: ', self.avg2) # Waitkey is necesarry to update image cv2.waitKey(3) self.rate.sleep() except IOError, (ErrorNumber, ErrorMessage): print('HERE') print('HERE') print(ErrorMessage) pass
def __init__(self): self.vid = cv2.VideoCapture("/dev/video6") self.vid.set(12,5) #contrast self.vid.set(13,255) #saturation # Calibration Matrices self.mtx = np.array([[592.156892, 0.000000, 326.689246], [0.000000, 584.923917, 282.822026], [0.000000, 0.000000, 1.000000]]) self.dist = np.array([-0.585868, 0.248490, -0.023236, -0.002907, 0.000000]) # Camera resolution self.w = 640 self.h = 480 # Reference velocity self.v_ref = 2 # Set node rate self.loop_rate = 30 self.ts = 1.0 / self.loop_rate self.rate = rospy.Rate(self.loop_rate) self.t0 = time.time() self.dt = self.ts self.count = 0 self.total = 0 self.avg = 0 self.total2 = 0 self.avg2 = 0 self.publish_image = True; self.timeprev = time.time()-self.dt time.sleep(0.2) # Compute the udistortion and rectification transformation map self.newcameramtx, self.roi = cv2.getOptimalNewCameraMatrix(self.mtx,self.dist,(self.w,self.h),0,(self.w,self.h)) self.mapx,self.mapy = cv2.initUndistortRectifyMap(self.mtx,self.dist,None,self.newcameramtx,(self.w,self.h),5) self.statepoints = '' self.printme = True self.state_constraints = barc_state() self.reference_trajectory = barc_state() # Initialize publishers and subscribers self.moving_pub = rospy.Publisher("moving", Moving, queue_size=1) self.moving_pub.publish(True) self.state_constraints_pub = rospy.Publisher("state_constraints", barc_state, queue_size = 1) self.reference_trajectory_pub = rospy.Publisher("reference_trajectory", barc_state, queue_size = 1) self.reference_image_pub = rospy.Publisher("image_reference_trajectory", Image, queue_size = 10) #self.optimal_state_sub = rospy.Subscriber("optimal_state_trajectory", barc_state, self.convertDistanceToPixels,queue_size=1) self.uOpt_pub = rospy.Publisher("uOpt", Input, queue_size=1) while not rospy.is_shutdown(): self.count = self.count +1 self.rel,self.dst = self.vid.read() # gets the current frame from the camera self.dt = time.time() - self.timeprev self.timeprev = time.time() self.cv_image = cv2.remap(self.dst,self.mapx,self.mapy,cv2.INTER_LINEAR) #Undistorts the fisheye image to rectangular self.x,self.y,self.w,self.h = self.roi self.dst = self.dst[self.y:self.y+self.h, self.x:self.x+self.w] # True makes the edge detection search for a yellow track using HSV. False will use grayscale and search for any edge regardless of color yellow = True kernel_size = 7 if yellow: cropped = self.cv_image cropped[0:280,0:640] = 0 hsv = cv2.cvtColor(cropped, cv2.COLOR_BGR2HSV) #########cv2.imshow('hsv',hsv[270:480,:]) # define range of blue color in HSV lower_yellow = np.array([0,200,100]) upper_yellow = np.array([50,255,255]) # Threshold the HSV image to get only blue colors edges = cv2.inRange(hsv, lower_yellow, upper_yellow) edges = cv2.GaussianBlur(edges,(kernel_size,kernel_size),0) ####################cv2.imshow("Edges",edges[270:480,:]) edgescropped = edges else: # Convert Color Image to Grayscale gray_image = cv2.cvtColor(self.cv_image, cv2.COLOR_BGR2GRAY) gray_image[0:270,0:640] = 0 gray_image = cv2.GaussianBlur(gray_image, (kernel_size, kernel_size), 0) edges = cv2.Canny(gray_image,40,80) edgescropped = edges alpha = .6 beta = 1. gamma = 0 # Colored = True makes the path show up on top of the colored image. colored = True if colored: line_img_color = np.zeros(self.cv_image.shape, dtype=np.uint8) midpointlist,leftlist,rightlist = self.draw_lines(line_img_color,edgescropped) LinesDrawn2_color = cv2.addWeighted(self.cv_image,alpha,line_img_color,beta,gamma) else: edges_color = cv2.cvtColor(edgescropped, cv2.COLOR_GRAY2RGB) line_img_color = np.zeros(edges_color.shape, dtype=np.uint8) midpointlist,leftlist,rightlist = self.draw_lines(line_img_color,edges) LinesDrawn2_color = cv2.addWeighted(edges_color,alpha,line_img_color,beta,gamma) fontFace = cv2.FONT_HERSHEY_TRIPLEX self.publish_states(midpointlist,leftlist,rightlist) cv2.imshow("Advanced Lane Detection", LinesDrawn2_color[270:480,:]) if self.publish_image: try: self.reference_image_pub.publish(self.bridge.cv2_to_imgmsg(LinesDrawn2_color, "bgr8")) except:# CvBridgeError as e: pass#print(e) self.timenext = time.time() self.timeElapsed = self.timenext - self.timeprev self.total2 = self.total2+self.timeElapsed self.avg2 = self.total2/self.count # Waitkey is necesarry to update image cv2.waitKey(3) self.rate.sleep()