def read_image(self): counter = fps = 0 video = None fps_counter = FPSCounter().start() self.running = True if self.cam.recording_enabled: filename = datetime.datetime.now().strftime( "%Y-%m-%d_%H-%M-%S") + ".avi" video = Video(filename, self.cam.directory, self.cam.max_file_size) while self.running: if ArducamSDK.Py_ArduCam_availableImage(self.cam.handle) <= 0: time.sleep(0.001) continue try: rtn_val, data, rtn_cfg = self.read_single_image() image = convert_image(data, rtn_cfg, self.cam.color_mode) image = cv2.medianBlur(image, 3) if counter % 10 == 0: fps = fps_counter.get_fps(10) if self.cam.show_label: self.add_label(image, fps) if self.cam.rotation_angle != 0: image = imutils.rotate_bound(image, int(self.cam.rotation_angle)) if self.cam.show_preview: self.show_image(image) if video is not None: video.add_frame(image) if counter != 0 and video.size >= self.cam.dump_size: video.dump_async(fps) counter += 1 except ImageReadException as e: self.logger.warning("Bad image read: {}".format(e)) finally: ArducamSDK.Py_ArduCam_del(self.cam.handle) if video is not None: video.close()
def readImage_thread(): global handle, running, Width, Height, save_flag, acfg, color_mode, save_raw global COLOR_BayerGB2BGR, COLOR_BayerRG2BGR, COLOR_BayerGR2BGR, COLOR_BayerBG2BGR count = 0 totalFrame = 0 time0 = time.time() time1 = time.time() data = {} # cv2.namedWindow("ArduCam Demo", 1) counter = 0 # clahe = cv2.createCLAHE(clipLimit=1.0, tileGridSize=(4, 4)) frame_h = cfg['frame_height'] frame_w = cfg['frame_width'] out = None t = time.perf_counter() fps = 0 while running: if ArducamSDK.Py_ArduCam_availableImage(handle) > 0: rtn_val, data, rtn_cfg = ArducamSDK.Py_ArduCam_readImage(handle) datasize = rtn_cfg['u32Size'] if counter % 10 == 0: t2 = time.perf_counter() fps = round(10 / (t2 - t), 2) t = t2 reprint(fps) if rtn_val != 0: print("read data fail!") continue if datasize == 0: continue image = convert_image(data, rtn_cfg, color_mode) image = imutils.rotate_bound(image, cfg["rotation_angle"]) kernel = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]]) image = cv2.medianBlur(image, 3) # image = cv2.filter2D(image, -1, kernel) # image = cv2.resize(image, (frame_w, frame_h), interpolation=cv2.INTER_AREA) # digits_area = image[int(image.shape[0] * 0.965):int((1 - 0) * image.shape[0]), int(image.shape[1] * 0):int((1 - 0.5) * image.shape[1]),:] # Defines height # From XXX to image.shape[1] # a1 = [0, int(image.shape[0] * 0.93)] # 0,896 # a2 = [0, int((1 - 0) * image.shape[0])] # 0,964 # # # Defines width # # From XXX to image.shape[1] # a3 = [int(image.shape[1] * 0.4), int((1 - 0) * image.shape[0])] # 512,964 # a4 = [int(image.shape[1] * 0.4), int(image.shape[0] * 0.93)] # 512,896 # # digits_area = np.array([[a1, a2, a3, a4]], dtype=np.int32) # image shape: [H,W] # digits area: [W,H] # digits_area = np.array([[[512,964], [0,964], [0,896], [512,896]]], dtype=np.int32) # print(digits_area) # 930 # 964 # 0 # 640 # cv2.fillConvexPoly(image, np.array(a1, a2, a3, a4, 'int32'), 255) # cv2.fillPoly(image, digits_area, (0, 0, 0)) if counter == 0: filename = datetime.datetime.now().strftime( "%Y-%m-%d_%H-%M-%S") + "_front_top.avi" # out = cv2.VideoWriter(filename, cv2.VideoWriter_fourcc('X', 'V', 'I', 'D'), 8, # (cfg['output_frame_width'], cfg['output_frame_height'])) out = cv2.VideoWriter( filename, cv2.VideoWriter_fourcc('X', 'V', 'I', 'D'), 22, (1280, 964)) # out = cv2.VideoWriter(filename, cv2.VideoWriter_fourcc('X', 'V', 'I', 'D'), 8, (640, 480)) # out = cv2.VideoWriter(filename, cv2.VideoWriter_fourcc('M', 'J', '2', 'C'), 8, (1280, 964)) #Lossless # out = cv2.VideoWriter(filename, cv2.VideoWriter_fourcc('H', 'F', 'Y', 'U'), 8, (1280, 964)) #Lossless reprint("Creating file " + str(filename)) cv2.putText(image, str(fps), (10, image.shape[0] - 10), cv2.FONT_HERSHEY_DUPLEX, 0.8, (255, 255, 255), 1, cv2.LINE_AA) # ardu = ("Time: " + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12644))[1])) + " ISO: " + str( # (ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12586))[1])) + " lum: " + str( # (ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12626))[1])) + "/" + str( # (ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12546))[1]))) # cv2.putText(image, ardu, (10, image.shape[0] - 40), cv2.FONT_HERSHEY_DUPLEX, 0.8, (255, 255, 255), 1, # cv2.LINE_AA) # try: # colorconversion = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # except: # colorconversion = image # pass cv2.imshow("stream", image) cv2.waitKey(5) # cv2.resize(image, (640, 480)) if out is not None: out.write(cv2.resize(image, (1280, 964))) # out.write(image) # regAddr = int(12644) # val = hex(ArducamSDK.Py_ArduCam_readSensorReg(handle, regAddr)[1]) # print("Integration time\t" + str(hex(12644)) + "\t" + str(hex(ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12644))[1]))) # print("Gains\t" + str(hex(12586)) + "\t" + str(hex(ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12586))[1]))) # print("Mean gain\t" + str(hex(12626)) + "\t" + str(hex(ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12626))[1]))) # print("Dark current\t" + str(hex(12680)) + "\t" + str(hex(ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12680))[1]))) # print("Frame exposure\t" + "\t" + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12460))[1]))) # logger.write(str(datetime.datetime.now().strftime("%Y-%m-%d: %H:%M:%S")) + "\t") # logger.write(str(hex(ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12644))[1])) + "\t") # logger.write(str(hex(ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12586))[1])) + "\t") # logger.write(str(hex(ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12626))[1])) + "\t") # logger.write(str(hex(ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12680))[1])) + "\n") # logger.flush() # try: # colorconversion = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # except: # colorconversion = image # pass # for i in range(2): # colorconversion = clahe.apply(colorconversion) # image = image[:,:,0] # print(image.shape) # image = cv2.cvtColor(colorconversion, cv2.COLOR_GRAY2BGR) # print(image.shape) # image = cv2.GaussianBlur(image, (3, 3), 0) # for i in range(image.shape[2]): # image[:,:,i] = colorconversion # fh.post_image(colorconversion) counter += 1 if counter == 500: out.release() # reprint("Sending file " + str(filename)) # threading.Thread(target=fh.post_files, args=[filename]).start() # counter = 0 # print("Exposure: " + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12460))[1])) + "\tAcq time: " + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12644))[1])) + "\tGain: " + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12586))[1])) + " lum: " + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12626))[1])) + "/" + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12546))[1])) + " DC: " + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12680))[1])) + "/" + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12580))[1]))) # print("Noise correction\t" + "\t" + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12500))[1]))) # print(str(regAddr) + "\t" + str(val)) # ["0x3012","0x0032"] = 12306 50 # 3012 (hex) = 12306 (dec) # 0032 (hex) = 50 (dec) # print(str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12644))[1])) + "\t" + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12586))[1])) + "\t" + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12626))[1])) + "\t" + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12680))[1]))) # ["0x3012","0x0032"] = 12306 50 # 3012 (hex) = 12306 (dec) # 0032 (hex) = 50 (dec) # if counter == 5: # cimage = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR) # cv2.imwrite(os.path.join(local_dir, "frame.jpg"), cv2.resize(cimage,(512,384))) # counter = 0 # cv2.imwrite(os.path.join(local_dir, "Desktop", "images", str(datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S-%f") + ".jpg")), image) # counter += 1 # cv2.imshow("ArduCam Demo",image) # cv2.waitKey(10) ArducamSDK.Py_ArduCam_del(handle) else: time.sleep(0.001)