def main(): logging.basicConfig( level=logging.INFO, format="%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s", handlers=[ logging.StreamHandler() ] ) lst = queue.LifoQueue(5) # Video source can be anything compatible with cv2.VideoCapture.open() # in DEMO videofeed is provided by a RPI 3B and videoStreamer.py camTh = CameraThread("http://192.168.12.131:8000/stream.mjpg", lst) camTh.start() cvTh = CvThread(lst, (0, 0, 0, 0), (180, 255, 35, 0), True) cvTh.start() while True: # Who dies first kills the other if not camTh.isAlive(): cvTh._evt.set() if not cvTh.isAlive(): camTh._evt.set() # If everyone is dead, quit mainThread if not cvTh.isAlive() and not camTh.isAlive(): break time.sleep(0.1) # Save a bit of CPU logging.debug("quit")
class testsprite(pygame.sprite.Sprite): def __init__(self, pos, url): pygame.sprite.Sprite.__init__(self) self.x, self.y = pos self.url = url self.camera = None #CameraThread(url[0], url[1]) self.stop = False #self.camera.start() self.image = pygame.image.load("lol.jpg") self.rect = self.image.get_rect() self.scale = 1 def stopCamera(self): print "cam stop.." self.stop = True self.camera.kill = True self.camera.join() self.image = pygame.image.load("lol.jpg") def startCamera(self): print "cam start.." self.stop = False if self.camera != None: self.camera.kill = False self.camera = CameraThread(self.url[0], self.url[1]) self.camera.start() def setScale(self, scale): self.scale = scale def update(self): if self.camera != None: pic = self.camera.pic if pic is not None: self.image = pygame.transform.scale(pygame.image.frombuffer(pic.tostring(), pic.size, 'RGB'), (160 * self.scale, 120 * self.scale)) self.rect = self.image.get_rect() self.rect.topleft = (self.x, self.y)
from CameraThread import CameraThread threads = [] urllist = open("knowngood", "r") for url in urllist: url = url[7:] ip = url[:url.find('/')] path = url[url.find('/'):] print ip, path c = CameraThread(ip.strip(), path.strip()) threads.append(c) c.start() while len(threads) > 0: try: threads = [t.join(1) for t in threads if t is not None and t.isAlive()] except KeyboardInterrupt: print "Ctrl-c received! Sending kill to %i threads... " % len(threads) for t in threads: t.kill = True print "..done" print "done!"
class MainThread(): def __init__(self, thread_id, thread_name): # display title self.print_text("[ barcode detection ]") # get parameters self.thread_id = thread_id self.thread_name = thread_name # calculate morphological kernel self.kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (20, 5)) # create thread self.camera_thread = CameraThread(1, "camera_thread", self) # start thread def start(self): self.print_text("starting thread: id (%d), name (%s)" % (self.thread_id, self.thread_name)) self.camera_thread.start() # stop thread def stop(self): self.print_text("stopping thread: id (%d), name (%s)" % (self.thread_id, self.thread_name)) cv2.destroyAllWindows() sys.exit() # display text on terminal def print_text(self, text): print text # detect barcode in image def _detect_barcode(self, img): # convert image to grayscale gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # calculate Sobel X gradient grad_x = cv2.Sobel( gray_img, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1) grad_img = cv2.convertScaleAbs(grad_x) # remove noise by blurring and extract barcode by thresholding blur_img = cv2.blur(grad_img, (9,9)) (_, thresh_img) = cv2.threshold( blur_img, 225, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) # enhance barcode region by morphology morph_img = cv2.morphologyEx( thresh_img, cv2.MORPH_CLOSE, self.kernel, iterations=1) morph_img = cv2.erode(morph_img, None, iterations=8) # find largest contour and calculate bounding box (_, contours, _) = cv2.findContours(morph_img.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) if len(contours) > 0: cnt = sorted( contours, key=cv2.contourArea, reverse=True)[0] rect = cv2.minAreaRect(cnt) bbox = np.int0(cv2.boxPoints(rect)) return rect, bbox else: return None ''' WORK IN PROGRESS def _scan_barcode(self, img, rect): if rect is None: return None gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) (x, y) = np.int0(rect[0]) (w, h) = np.int0(rect[1]) angle = rect[2] if angle < -45.0: angle += 90.0 w, h = h, w M = cv2.getRotationMatrix2D((x,y), angle, 1.0) rot_img = cv2.warpAffine(img, M, gray_img.shape) barcode_img = cv2.getRectSubPix(rot_img, (w,h), (x,y)) return barcode_img ''' def process(self, frame): # mirror the camera frame frame = cv2.flip(frame, 1) # detect barcode in image rect, bbox = self._detect_barcode(frame) # draw barcode bounding box on frame bbox_img = frame.copy() if bbox is not None: cv2.drawContours(bbox_img, [bbox], -1, (0,255,0), 2) ''' WORK IN PROGRESS barcode_img = self._scan_barcode(frame, rect) bg_img = np.zeros(frame.shape, dtype=frame.dtype) if barcode_img is not None: (w, h, _) = barcode_img.shape bg_img[:w, :h, :] = barcode_img result_img = np.hstack([bbox_img, bg_img]) result_img = cv2.resize(result_img, dsize=(0,0), fx=0.5, fy=0.5) ''' # display result cv2.imshow("result", bbox_img)