def run(self): f = next(self.stream) frame = f.array self.rawCapture.truncate(0) if self.image_d == 1: frame = rgb2gray(frame) return frame
def update(self): # keep looping infinitely until the thread is stopped for f in self.stream: # grab the frame from the stream and clear the stream in # preparation for the next frame self.frame = f.array self.rawCapture.truncate(0) if self.image_d == 1: self.frame = rgb2gray(self.frame) # if the thread indicator variable is set, stop the thread if not self.on: break
def update(self): from datetime import datetime, timedelta import pygame.image while self.on: start = datetime.now() if self.cam.query_image(): # snapshot = self.cam.get_image() # self.frame = list(pygame.image.tostring(snapshot, "RGB", False)) snapshot = self.cam.get_image() snapshot1 = pygame.transform.scale(snapshot, self.resolution) #************************ snapshot2 = pygame.transform.rotate(pygame.transform.flip(snapshot1, True, False), -90) frame1 = pygame.surfarray.pixels3d(snapshot2) greenLower = (30, 45, 70) # Set boundary for green (HSV Color Space) greenUpper = (64, 240, 240) img = frame1.copy() # Import image blurred = cv2.GaussianBlur(img, (11, 11), 0) hsv = cv2.cvtColor(blurred, cv2.COLOR_RGB2HSV) # Transfer to HSV color space mask = cv2.inRange(hsv, greenLower, greenUpper) mask = cv2.erode(mask, None, iterations=2) # Create mask to find green areas mask = cv2.dilate(mask, None, iterations=2) # cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # if cnts[1] == []: # x, y, radius = [0, 0, 0] # else: # c = max(cnts[1], key=cv2.contourArea) # ((y, x), radius) = cv2.minEnclosingCircle(c) # get center and radius of the ball # # img = cv2.circle(img, (int(y), int(x)), int(radius), (255, 0, 0), 2) # for row in range(mask.shape[0]): # for col in range(mask.shape[1]): # if (row-x)**2 + (col-y)**2 > radius**2: # mask[row][col] = 0 # else: # mask[row][col] = 255 img[:,:,0] = mask self.frame = img # for i in range(3): # self.frame[:,:,i] = mask #********* if self.image_d == 1: self.frame = rgb2gray(self.frame) stop = datetime.now() s = 1 / self.framerate - (stop - start).total_seconds() if s > 0: time.sleep(s) self.cam.stop()
def update(self): # keep looping infinitely until the thread is stopped for f in self.stream: # grab the frame from the stream and clear the stream in # preparation for the next frame self.frame = f.array self.rawCapture.truncate(0) if self.image_d == 1: self.frame = rgb2gray(self.frame) rgb_image = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB) if self.dpcar_opt['org'] == True: #### code edited @2020-07-06 cv2.imshow("ORIGIN Frame", rgb_image) if self.mode == 'deepicar': #hsv_image = self.lane_follower.cvtRGB2HSV(rgb_image) #BGR frame input hsv_image = self.lane_follower.cvtBGR2HSV( self.frame) #BGR frame input edge_detected_image = self.lane_follower.detect_lane_edges( hsv_image) combo_image = self.lane_follower.detect_follow_lanes( rgb_image, edge_detected_image) if self.dpcar_opt['hsv'] == True: cv2.imshow("BGR2HSV Image", hsv_image) if self.dpcar_opt['edges'] == True: cv2.imshow("Detected Edges Image", edge_detected_image) if self.dpcar_opt['lanes'] == True: cv2.imshow("Road with Lane line", combo_image) #time.sleep(1) cv2.waitKey(10) #### code edited @2020-07-06 # if the thread indicator variable is set, stop the thread if not self.on: break
def update(self): from datetime import datetime, timedelta import pygame.image while self.on: start = datetime.now() if self.cam.query_image(): # snapshot = self.cam.get_image() # self.frame = list(pygame.image.tostring(snapshot, "RGB", False)) snapshot = self.cam.get_image() snapshot1 = pygame.transform.scale(snapshot, self.resolution) self.frame = pygame.surfarray.pixels3d(pygame.transform.rotate(pygame.transform.flip(snapshot1, True, False), 90)) if self.image_d == 1: self.frame = rgb2gray(self.frame) stop = datetime.now() s = 1 / self.framerate - (stop - start).total_seconds() if s > 0: time.sleep(s) self.cam.stop()
def update(self): # keep looping infinitely until the thread is stopped for f in self.stream: # grab the frame from the stream and clear the stream in # preparation for the next frame self.frame = f.array self.rawCapture.truncate(0) #print('Captured %dx%dx%d image' % (self.frame.shape[1], self.frame.shape[0],self.frame.shape[2])) self.detect() ''' img = self.frame img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) cv2.imshow('image',img) cv2.waitKey(30) cv2.imwrite(str(self.num)+'traffic_sign.png',img) print(self.num) self.num = self.num + 1 ''' if self.image_d == 1: self.frame = rgb2gray(self.frame) # if the thread indicator variable is set, stop the thread if not self.on: break
def update(self): from datetime import datetime, timedelta import pygame.image while self.on: start = datetime.now() # WebCamera: frame1 if self.cam.query_image(): snapshot = self.cam.get_image() snapshot1 = pygame.transform.scale(snapshot, self.resolution) frame1 = pygame.surfarray.pixels3d( pygame.transform.rotate(pygame.transform.flip(snapshot1, True, False), -90)) # Import opencv for mask generation greenLower = (30, 45, 70) # Set boundary for green (HSV Color Space) greenUpper = (64, 255, 255) img = frame1.copy() # Import image blurred = cv2.GaussianBlur(img, (11, 11), 0) hsv = cv2.cvtColor(blurred, cv2.COLOR_RGB2HSV) # Transfer to HSV color space mask = cv2.inRange(hsv, greenLower, greenUpper) mask = cv2.erode(mask, None, iterations=2) # Create mask to find green areas mask = cv2.dilate(mask, None, iterations=2) cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) if cnts[1] == []: x, y, radius = [0, 0, 0] else: c = max(cnts[1], key=cv2.contourArea) ((y, x), radius) = cv2.minEnclosingCircle(c) # get center and radius of the ball # img = cv2.circle(img, (int(y), int(x)), int(radius), (255, 0, 0), 2) for row in range(mask.shape[0]): for col in range(mask.shape[1]): if (row-x)**2 + (col-y)**2 > radius**2: mask[row][col] = 0 else: mask[row][col] = 255 frame1 = rgb2gray(frame1) # PiCamera: frame2 f = next(self.stream) frame2 = rgb2gray(f.array) self.rawCapture.truncate(0) # Sensor info Dist_M = np.zeros((self.image_h, self.image_w)) distance_in_mm1 = self.tof.get_distance(1) distance_in_mm2 = self.tof.get_distance(2) d1 = np.round(min(distance_in_mm1 / 1000 * 255, 255)) d2 = np.round(min(distance_in_mm2 / 1000 * 255, 255)) Dist_M[:, :int(self.image_w / 2)] = int(d1) Dist_M[:, int(self.image_w / 2):] = int(d2) # Fusion of both cameras, testing # ??============================ self.fusion[:, :, 0] = frame2 self.fusion[:, :, 1] = mask self.fusion[:, :, 2] = Dist_M self.frame = self.fusion # print("1: {}, 2: {}, reso: {}".format(frame1.dtype, frame2.dtype, self.resolution)) # ============================= stop = datetime.now() s = 1 / self.framerate - (stop - start).total_seconds() if s > 0: time.sleep(s) self.cam.stop()
def poll(self): self.frame = self.cap.read() if self.image_d == 1: self.frame = rgb2gray(self.frame)