def captureImage(): global handle,Width,Height,cfg,color_mode global COLOR_BayerGB2BGR,COLOR_BayerRG2BGR,COLOR_BayerGR2BGR,COLOR_BayerBG2BGR rtn_val = ArducamSDK.Py_ArduCam_captureImage(handle) if rtn_val > 255: print "Error capture image, rtn_val = ",rtn_val if rtn_val == ArducamSDK.USB_CAMERA_USB_TASK_ERROR: exit() if ArducamSDK.Py_ArduCam_availableImage(handle) > 0: rtn_val,data,rtn_cfg = ArducamSDK.Py_ArduCam_readImage(handle) datasize = rtn_cfg['u32Size'] if rtn_val != 0 or datasize == 0: print "read data fail!" ArducamSDK.Py_ArduCam_del(handle) return image = convert_image(data,rtn_cfg,color_mode) if h_flip: image = cv2.flip(image, 1) if v_flip: image = cv2.flip(image, 0) try: img_msg = bridge.cv2_to_imgmsg(image, "bgr8") img_msg.header.stamp = rospy.Time.now() img_msg.header.frame_id = id_frame pub.publish(img_msg) cv2.waitKey(10) except CvBridgeError as e: pass ArducamSDK.Py_ArduCam_del(handle) else: time.sleep(0.001)
def capture(request): global handle,Width,Height,cfg,color_mode global COLOR_BayerGB2BGR,COLOR_BayerRG2BGR,COLOR_BayerGR2BGR,COLOR_BayerBG2BGR if ArducamSDK.Py_ArduCam_availableImage(handle) > 0: rtn_val,data,rtn_cfg = ArducamSDK.Py_ArduCam_readImage(handle) datasize = rtn_cfg['u32Size'] if rtn_val != 0: print "read data fail!" return CaptureResponse("Failed to Capture") if datasize == 0: return CaptureResponse("Failed to Capture") image = convert_image(data,rtn_cfg,color_mode) if h_flip: image = cv2.flip(image, 1) if v_flip: image = cv2.flip(image, 0) try: img_msg = bridge.cv2_to_imgmsg(image, "bgr8") img_msg.header.stamp = rospy.Time.now() img_msg.header.frame_id = id_frame pub_capture.publish(img_msg) return CaptureResponse("Captured") except CvBridgeError as e: return CaptureResponse("Failed to Capture") else: return CaptureResponse("Failed to Capture")
def readImage_thread(self, child_conn): count = 0 time0 = time.time() time1 = time.time() data = {} while self.running: if ArducamSDK.Py_ArduCam_availableImage(self.handle) > 0: rtn_val, data, rtn_cfg = ArducamSDK.Py_ArduCam_readImage( self.handle) datasize = rtn_cfg['u32Size'] if rtn_val != 0 or datasize == 0: self.ArducamSDK.Py_ArduCam_del(self.handle) print("read data fail!") continue image_data = self.dBytesToMat(data, self.BitWidth, self.Width, self.Height) child_conn.send(image_data) time1 = time.time() if time1 - time0 >= 1: # print("%s %d %s\n"%("camera fps:",count,"/s")) count = 0 time0 = time1 count += 1 ArducamSDK.Py_ArduCam_del(self.handle) # time.sleep(.01) else: time.sleep(0.005) child_conn.close()
def process_thread(self): global handle, running, Width, Height, save_flag, cfg, color_mode global COLOR_BayerGB2BGR, COLOR_BayerRG2BGR, COLOR_BayerGR2BGR, COLOR_BayerBG2BGR while running: if ArducamSDK.Py_ArduCam_availableImage(handle) > 0: self.rawImageTimeStamp = datetime.datetime.now() self.imageTimeStamp = datetime.datetime.now().strftime( '%Y_%m_%d_%H_%M_%S') self.imageName = 'imagem_%s.jpg' % self.imageTimeStamp self.imagePath = "images/" + self.imageName rtn_val, data, rtn_cfg = ArducamSDK.Py_ArduCam_readImage( handle) datasize = rtn_cfg['u32Size'] if rtn_val != 0: print("ardu cam read data fail!!") if datasize == 0: print("ardu cam read data size = 0!!") image = convert_image(data, rtn_cfg, color_mode) if save_flag: cv2.imwrite(self.imagePath, image) self.image = np.uint8( ndimage.imread(self.imagePath, flatten=True)) self.image = self.crop_end(self.image, 0, 150) save_flag = False ArducamSDK.Py_ArduCam_del(handle) else: time.sleep(0.01)
def read_next_frame(self): code = ArducamSDK.Py_ArduCam_captureImage(self.handle) if code > 255: raise ArducamException("Error capturing image", code=code) if ArducamSDK.Py_ArduCam_availableImage(self.handle): try: rtn_val, data, rtn_cfg = ArducamSDK.Py_ArduCam_readImage( self.handle) if rtn_val != 0 or rtn_cfg['u32Size'] == 0: raise ArducamException( "Bad image read! Datasize was {}".format( rtn_cfg['u32Size']), code=rtn_val) return convert_image(data, rtn_cfg, self.color_mode) finally: ArducamSDK.Py_ArduCam_del(self.handle)
def readImage_thread(): global handle, running, Width, Height, save_flag, cfg, color_mode, save_raw global COLOR_BayerGB2BGR, COLOR_BayerRG2BGR, COLOR_BayerGR2BGR, COLOR_BayerBG2BGR count = 0 totalFrame = 0 time0 = time.time() time1 = time.time() data = {} cv2.namedWindow("ArduCam Demo", 1) if not os.path.exists("images"): os.makedirs("images") while running: display_time = time.time() if ArducamSDK.Py_ArduCam_availableImage(handle) > 0: rtn_val, data, rtn_cfg = ArducamSDK.Py_ArduCam_readImage(handle) datasize = rtn_cfg['u32Size'] if rtn_val != 0: print("read data fail!") continue if datasize == 0: continue image = convert_image(data, rtn_cfg, color_mode) time1 = time.time() if time1 - time0 >= 1: print("%s %d %s\n" % ("fps:", count, "/s")) count = 0 time0 = time1 count += 1 if save_flag: cv2.imwrite("images/image%d.jpg" % totalFrame, image) if save_raw: with open("images/image%d.raw" % totalFrame, 'wb') as f: f.write(data) totalFrame += 1 image = cv2.resize(image, (640, 480), interpolation=cv2.INTER_LINEAR) cv2.imshow("ArduCam Demo", image) cv2.waitKey(10) ArducamSDK.Py_ArduCam_del(handle) #print("------------------------display time:",(time.time() - display_time)) else: time.sleep(0.001)
def capture_background(): global handle, running, width, height, cfg, color_mode, select, background_image while ArducamSDK.Py_ArduCam_availableImage(handle) == 0: time.sleep(0.001) rtn_val, data, rtn_cfg = ArducamSDK.Py_ArduCam_readImage(handle) datasize = rtn_cfg['u32Size'] if rtn_val != 0 or datasize == 0: ArducamSDK.Py_ArduCam_del(handle) print("Failed to read data!") exit() ArducamSDK.Py_ArduCam_del(handle) background_image = ImageConvert.convert_image(data, rtn_cfg, color_mode) background_image = cv2.resize(background_image, (screen_width, screen_height), interpolation=cv2.INTER_LINEAR) return background_image
if len(sys.argv) > 1: config_file_name = sys.argv[1] if not os.path.exists(config_file_name): print("Config file does not exist.") exit() else: showHelp() exit() if camera_initFromFile(config_file_name): ArducamSDK.Py_ArduCam_setMode(handle, ArducamSDK.CONTINUOUS_MODE) while True: if ArducamSDK.Py_ArduCam_availableImage(handle) > 0: print('hello') rtn_val, data, rtn_cfg = ArducamSDK.Py_ArduCam_readImage( handle) image = convert_image(data, rtn_cfg, color_mode) #pause #ArducamSDK.Py_ArduCam_writeReg_8_8(handle,0x46,3,0x40) rtn_val = ArducamSDK.Py_ArduCam_close(handle) if rtn_val == 0: print("device close success!") else: print("device close fail!") #os.system("pause")
def readImage_thread(publisher_img): global handle, running, Width, Height, save_flag, cfg, color_mode, rt_lock, count1 global COLOR_BayerGB2BGR, COLOR_BayerRG2BGR, COLOR_BayerGR2BGR, COLOR_BayerBG2BGR global logpath, loghandle, log_stepsize, logging, curr_logging, loghandle1, max_num_frames_log, curr_num_frames_log bridge = CvBridge() count = 0 totalFrame = 0 time0 = time.time() time1 = time.time() data = {} # DEMO WINDOW DECISION!!! cv2.namedWindow("ArduCam Demo", 1) if not os.path.exists("images1"): os.makedirs("images1") print "CIAO" if not os.path.exists(logpath): oldmask = os.umask(000) os.makedirs(logpath, 0777) print "CREATING DIRECTORY" while running: if count1 == 1: if rt_lock.acquire(False): print "read image", count1 display_time = time.time() if ArducamSDK.Py_ArduCam_availableImage(handle) > 0: rtn_val, data, rtn_cfg = ArducamSDK.Py_ArduCam_readImage( handle) datasize = rtn_cfg['u32Size'] if rtn_val != 0: print "read data fail!" continue if datasize == 0: continue image = None emImageFmtMode = cfg['emImageFmtMode'] if emImageFmtMode == ArducamSDK.FORMAT_MODE_JPG: image = JPGToMat(data, datasize) if emImageFmtMode == ArducamSDK.FORMAT_MODE_YUV: image = YUVToMat(data) if emImageFmtMode == ArducamSDK.FORMAT_MODE_RGB: image = RGB565ToMat(data) if emImageFmtMode == ArducamSDK.FORMAT_MODE_MON: image = np.frombuffer(data, np.uint8).reshape( Height, Width, 1) if logging: curr_logging = True #save ENTIRE IMAGE!!! np.save(loghandle1, image) prev_bin = 0 for i in range(log_stepsize, 255, log_stepsize): #print i #print bigger0-bigger20 smaller = (image < i).sum() loghandle.write( str(smaller - prev_bin) + ',' + ' ') prev_bin = smaller #last one: smaller = (image < 256).sum() loghandle.write( str(smaller - prev_bin) + ';' + '\n') curr_num_frames_log += 1 if not (curr_num_frames_log <= max_num_frames_log): logging = False loghandle.close() loghandle1.close() print "Logging to" + logname + "has ended" curr_num_frames_log = 0 curr_logging = False if emImageFmtMode == ArducamSDK.FORMAT_MODE_RAW: image = np.frombuffer(data, np.uint8).reshape( Height, Width, 1) if color_mode == 0: image = cv2.cvtColor(image, COLOR_BayerRG2BGR) if color_mode == 1: image = cv2.cvtColor(image, COLOR_BayerGR2BGR) if color_mode == 2: image = cv2.cvtColor(image, COLOR_BayerGB2BGR) if color_mode == 3: image = cv2.cvtColor(image, COLOR_BayerBG2BGR) if color_mode < 0 and color_mode > 3: image = cv2.cvtColor(image, COLOR_BayerGB2BGR) #HERE ONLY OUTPUTS CURRENT FPS -> IS THIS REALLY NEEDED? time1 = time.time() if time1 - time0 >= 1: #print "%s %d %s\n"%("fps:",count,"/s") print(time1 - time0) count = 0 time0 = time1 count += 1 # POTENTIALLY SAVES THE IMAGE -> NEEDED?! if save_flag: cv2.imwrite("images/image%d.jpg" % totalFrame, image) totalFrame += 1 # Descide if you want to resize or not here!! # ATTENTION: THIS INFLUENCES THE RESULTING FRAME RATE #image = cv2.resize(image,(640,480),interpolation = cv2.INTER_LINEAR) # INCLUDE FCT INN THE FUTURE WHICH PART OF THE IMAGE I WANT TO SEE,... # CROPPING IS HERE: #image = image[0:640, 0:480] # DESCIDE IF THE NORMAL DEMO WINDOW SHOULD OPEN OR NOT # IN LONG EXPOSURE SCENARIO THIS MAKES NO BIG DIFFERENCE!!! cv2.imshow("ArduCam Demo", image) cv2.waitKey(10) #NEWLY INSERTED ROS PUBLISHER try: publisher_img.publish( bridge.cv2_to_imgmsg(image, 'mono8')) except CvBridgeError as e: print(e) #publisher_img.publish(image) # assuming: del deletes the pic,... #ArducamSDK.Py_ArduCam_del(handle) ArducamSDK.Py_ArduCam_flush(handle) #print "------------------------display time:",(time.time() - display_time) else: time.sleep(0.001) count1 = count1 - 1 rt_lock.release() else: time.sleep(0.001) else: time.sleep(0.001)
def readImage_thread(): global handle,running,Width,Height,save_flag,acfg,color_mode,save_raw global COLOR_BayerGB2BGR,COLOR_BayerRG2BGR,COLOR_BayerGR2BGR,COLOR_BayerBG2BGR count = 0 totalFrame = 0 time0 = time.time() time1 = time.time() data = {} cv2.namedWindow("ArduCam Demo",1) counter = 0 clahe = cv2.createCLAHE(clipLimit=1.0, tileGridSize=(4,4)) while running: display_time = time.time() if ArducamSDK.Py_ArduCam_availableImage(handle) > 0: rtn_val,data,rtn_cfg = ArducamSDK.Py_ArduCam_readImage(handle) datasize = rtn_cfg['u32Size'] if rtn_val != 0: print("read data fail!") continue if datasize == 0: continue image = convert_image(data,rtn_cfg,color_mode) # digits_area = image[int(image.shape[0] * 0.965):int((1 - 0) * image.shape[0]), int(image.shape[1] * 0):int((1 - 0.5) * image.shape[1]),:] #Defines height #From XXX to image.shape[1] a1 = [0, int(image.shape[0] * 0.93)] #0,896 a2 = [0, int((1 - 0) * image.shape[0])] #0,964 #Defines width #From XXX to image.shape[1] a3 = [int(image.shape[1] * 0.4), int((1 - 0) * image.shape[0])] #512,964 a4 = [int(image.shape[1] * 0.4), int(image.shape[0] * 0.93)] #512,896 digits_area = np.array([[a1, a2, a3, a4]], dtype=np.int32) #image shape: [H,W] #digits area: [W,H] # digits_area = np.array([[[512,964], [0,964], [0,896], [512,896]]], dtype=np.int32) # print(digits_area) #930 #964 #0 #640 # cv2.fillConvexPoly(image, np.array(a1, a2, a3, a4, 'int32'), 255) cv2.fillPoly( image, digits_area, (0,0,0) ) if counter == 0: filename = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + "_front_top.avi" out = cv2.VideoWriter(filename, cv2.VideoWriter_fourcc('X', 'V', 'I', 'D'), 8, (1280, 964)) # out = cv2.VideoWriter(filename, cv2.VideoWriter_fourcc('M', 'J', '2', 'C'), 8, (1280, 964)) #Lossless # out = cv2.VideoWriter(filename, cv2.VideoWriter_fourcc('H', 'F', 'Y', 'U'), 8, (1280, 964)) #Lossless reprint("Creating file " + str(filename)) cv2.putText(image, datetime.datetime.now().strftime("%Y-%m-%d: %H:%M:%S %f"), (10, image.shape[0] - 10), cv2.FONT_HERSHEY_DUPLEX, 0.8, (255, 255, 255), 1, cv2.LINE_AA) ardu = ("Time: " + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12644))[1])) + " ISO: " + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12586))[1])) + " lum: " + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12626))[1])) + "/" + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12546))[1]))) cv2.putText(image, ardu, (10, image.shape[0] - 40), cv2.FONT_HERSHEY_DUPLEX, 0.8, (255, 255, 255), 1, cv2.LINE_AA) # cv2.imshow("stream", image) # cv2.waitKey(0) out.write(image) # regAddr = int(12644) # val = hex(ArducamSDK.Py_ArduCam_readSensorReg(handle, regAddr)[1]) # print("Integration time\t" + str(hex(12644)) + "\t" + str(hex(ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12644))[1]))) # print("Gains\t" + str(hex(12586)) + "\t" + str(hex(ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12586))[1]))) # print("Mean gain\t" + str(hex(12626)) + "\t" + str(hex(ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12626))[1]))) # print("Dark current\t" + str(hex(12680)) + "\t" + str(hex(ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12680))[1]))) # print("Frame exposure\t" + "\t" + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12460))[1]))) # logger.write(str(datetime.datetime.now().strftime("%Y-%m-%d: %H:%M:%S")) + "\t") # logger.write(str(hex(ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12644))[1])) + "\t") # logger.write(str(hex(ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12586))[1])) + "\t") # logger.write(str(hex(ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12626))[1])) + "\t") # logger.write(str(hex(ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12680))[1])) + "\n") # logger.flush() cv2.resize(image,(512,384)) try: colorconversion = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) except: colorconversion = image pass for i in range(2): colorconversion = clahe.apply(colorconversion) # image = image[:,:,0] # print(image.shape) # image = cv2.cvtColor(colorconversion, cv2.COLOR_GRAY2BGR) # print(image.shape) # image = cv2.GaussianBlur(image, (3, 3), 0) # for i in range(image.shape[2]): # image[:,:,i] = colorconversion fh.post_image(colorconversion) counter += 1 if counter == 500: out.release() reprint("Sending file " + str(filename)) threading.Thread(target=fh.post_files,args=[filename]).start() counter = 0 # print("Exposure: " + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12460))[1])) + "\tAcq time: " + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12644))[1])) + "\tGain: " + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12586))[1])) + " lum: " + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12626))[1])) + "/" + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12546))[1])) + " DC: " + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12680))[1])) + "/" + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12580))[1]))) # print("Noise correction\t" + "\t" + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12500))[1]))) #print(str(regAddr) + "\t" + str(val)) #["0x3012","0x0032"] = 12306 50 #3012 (hex) = 12306 (dec) #0032 (hex) = 50 (dec) # print(str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12644))[1])) + "\t" + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12586))[1])) + "\t" + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12626))[1])) + "\t" + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12680))[1]))) #["0x3012","0x0032"] = 12306 50 #3012 (hex) = 12306 (dec) #0032 (hex) = 50 (dec) # if counter == 5: # cimage = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR) # cv2.imwrite(os.path.join(local_dir, "frame.jpg"), cv2.resize(cimage,(512,384))) # counter = 0 # cv2.imwrite(os.path.join(local_dir, "Desktop", "images", str(datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S-%f") + ".jpg")), image) # counter += 1 # cv2.imshow("ArduCam Demo",image) # cv2.waitKey(10) ArducamSDK.Py_ArduCam_del(handle) #print("------------------------display time:",(time.time() - display_time)) else: time.sleep(0.001);
def readImage_thread(publisher_img): global handle,running,Width,Height,save_flag,cfg,color_mode global COLOR_BayerGB2BGR,COLOR_BayerRG2BGR,COLOR_BayerGR2BGR,COLOR_BayerBG2BGR bridge = CvBridge() count = 0 totalFrame = 0 time0 = time.time() time1 = time.time() data = {} # DEMO WINDOW DECISION!!! #cv2.namedWindow("ArduCam Demo",1) if not os.path.exists("images"): os.makedirs("images") while running: display_time = time.time() if ArducamSDK.Py_ArduCam_availableImage(handle) > 0: rtn_val,data,rtn_cfg = ArducamSDK.Py_ArduCam_readImage(handle) datasize = rtn_cfg['u32Size'] if rtn_val != 0: print "read data fail!" continue if datasize == 0: continue image = None emImageFmtMode = cfg['emImageFmtMode'] if emImageFmtMode == ArducamSDK.FORMAT_MODE_JPG: image = JPGToMat(data,datasize) if emImageFmtMode == ArducamSDK.FORMAT_MODE_YUV: image = YUVToMat(data) if emImageFmtMode == ArducamSDK.FORMAT_MODE_RGB: image = RGB565ToMat(data) if emImageFmtMode == ArducamSDK.FORMAT_MODE_MON: image = np.frombuffer(data, np.uint8).reshape( Height,Width , 1 ) if emImageFmtMode == ArducamSDK.FORMAT_MODE_RAW: image = np.frombuffer(data, np.uint8).reshape( Height,Width , 1 ) if color_mode == 0: image = cv2.cvtColor(image,COLOR_BayerRG2BGR) if color_mode == 1: image = cv2.cvtColor(image,COLOR_BayerGR2BGR) if color_mode == 2: image = cv2.cvtColor(image,COLOR_BayerGB2BGR) if color_mode == 3: image = cv2.cvtColor(image,COLOR_BayerBG2BGR) if color_mode < 0 and color_mode > 3: image = cv2.cvtColor(image,COLOR_BayerGB2BGR) #HERE ONLY OUTPUTS CURRENT FPS -> IS THIS REALLY NEEDED? time1 = time.time() if time1 - time0 >= 1: #print "%s %d %s\n"%("fps:",count,"/s") print (time1 - time0) count = 0 time0 = time1 count += 1 # POTENTIALLY SAVES THE IMAGE -> NEEDED?! if save_flag: cv2.imwrite("images/image%d.jpg"%totalFrame,image) totalFrame += 1 # Descide if you want to resize or not here!! # ATTENTION: THIS INFLUENCES THE RESULTING FRAME RATE #image = cv2.resize(image,(640,480),interpolation = cv2.INTER_LINEAR) # INCLUDE FCT INN THE FUTURE WHICH PART OF THE IMAGE I WANT TO SEE,... # CROPPING IS HERE: #image = image[0:640, 0:480] # DESCIDE IF THE NORMAL DEMO WINDOW SHOULD OPEN OR NOT,... #cv2.imshow("ArduCam Demo",image) #NEWLY INSERTED ROS PUBLISHER try: publisher_img.publish(bridge.cv2_to_imgmsg(image,'mono8')) except CvBridgeError as e: print(e) #publisher_img.publish(image) cv2.waitKey(10) ArducamSDK.Py_ArduCam_del(handle) #print "------------------------display time:",(time.time() - display_time) else: time.sleep(0.001);
def readImage_thread(): global handle, running, width, height, save_multiview, LED_DROP, save_single_flag, save_flag, save_beginning, save_raw, cfg, color_mode, \ calibrate_flag, calibrate_grey, calibrate_at, calibrate_target, calibrate_color, calibrate_results time0 = time.time() time1 = time.time() single_count = 0 count = 0 totalFrame = 0 data = {} cv2.namedWindow("ArduCam output", 1) if not os.path.exists("images"): os.makedirs("images") while running: if ArducamSDK.Py_ArduCam_availableImage(handle) > 0: rtn_val, data, rtn_cfg = ArducamSDK.Py_ArduCam_readImage(handle) datasize = rtn_cfg['u32Size'] if rtn_val != 0 or datasize == 0: ArducamSDK.Py_ArduCam_del(handle) print("Failed to read data.") continue image = ImageConvert.convert_image(data, rtn_cfg, color_mode) time1 = time.time() interval = 3 # compute framerate every [interval] seconds if time1 - time0 >= interval: if save_flag: elapsed_time = time.time() - save_beginning print( "Frames per second: %d/s (#%d to #%d), elapsed time: %ds." % (count / interval, totalFrame - count, totalFrame, elapsed_time)) else: print("Frames per second: %d/s." % (count / interval)) count = 0 time0 = time1 count += 1 if LED_DROP > 0: LED_DROP -= 1 else: if calibrate_flag is not None: if calibrate_grey is None: # first pass calibrate_grey = np.median(image) print("Target grey: %f." % (calibrate_grey)) elif calibrate_flag >= 0: target, color = get_multiview_components( calibrate_flag) if target != calibrate_target or color != calibrate_color: calibrate_target = target calibrate_color = color arduino_write_read(target, color) ArducamSDK.Py_ArduCam_writeSensorReg( handle, 0x0202, calibrate_at) LED_DROP = 1 else: current_grey = np.median(image) if current_grey + calibrate_threshold >= calibrate_grey or calibrate_at >= calibrate_cap: print( "Calibration result for led %d and color %d (raw: %d): %d (current grey: %f)." % (target, color, calibrate_flag, calibrate_at, current_grey)) if target not in calibrate_results.keys(): calibrate_results[target] = { color: calibrate_at } else: calibrate_results[target][ color] = calibrate_at calibrate_at = calibrate_start calibrate_flag -= 1 else: calibrate_at += calibrate_increase # TODO: adaptative increase ArducamSDK.Py_ArduCam_writeSensorReg( handle, 0x0202, calibrate_at) LED_DROP = 1 else: print("Middle led: %d." % (LED_MIDDLE)) arduino_write_read(LED_MIDDLE, 0) ArducamSDK.Py_ArduCam_writeSensorReg( handle, 0x0202, coarse_integration) LED_DROP = 1 print("Calibration finished.") calibrate_flag = None elif save_multiview is not None: previous = save_multiview + 1 if save_multiview >= 0: target, color = get_multiview_components( save_multiview) if target in calibrate_results.keys(): integration = calibrate_results[target][color] else: integration = coarse_integration arduino_write_read(target, color) ArducamSDK.Py_ArduCam_writeSensorReg( handle, 0x0202, integration) LED_DROP = 1 if save_multiview != LED_MAX_ITERATIONS: # first pass target, color = get_multiview_components( previous) # previous if target in calibrate_results.keys(): integration = calibrate_results[target][color] else: integration = coarse_integration grey = np.median(image) cv2.imwrite( "images/_multi_%d_%d.png" % (target, color), image) print( "Multiview image with led %d and color %d saved (raw: %d), with integration time: %d (grey: %f)." % (target, color, previous, integration, grey)) save_multiview -= 1 else: print("Middle led: %d." % (LED_MIDDLE)) arduino_write_read(LED_MIDDLE, 0) ArducamSDK.Py_ArduCam_writeSensorReg( handle, 0x0202, coarse_integration ) # reset integration to base value LED_DROP = 1 target, color = get_multiview_components( previous) # previous cv2.imwrite( "images/_multi_%d_%d.png" % (target, color), image) print( "Multiview image with led %d and color %d saved (raw: %d)." % (target, color, previous)) save_multiview = None else: if save_single_flag: cv2.imwrite("images/_single%d.png" % single_count, image) print("Single image #%d saved." % (single_count)) single_count += 1 save_single_flag = False if save_flag: cv2.imwrite("images/image%d.png" % totalFrame, image) if save_raw: with open("images/image%d.raw" % totalFrame, 'wb') as f: f.write(data) totalFrame += 1 image = cv2.resize(image, (width, height), interpolation=cv2.INTER_LINEAR) cv2.imshow("ArduCam output", image) cv2.waitKey(10) ArducamSDK.Py_ArduCam_del(handle) else: time.sleep(0.001)