def open(self): if self.hCamera > 0: return True # 打开相机 hCamera = 0 try: hCamera = mvsdk.CameraInit(self.DevInfo, -1, -1) except mvsdk.CameraException as e: print("CameraInit Failed({}): {}".format(e.error_code, e.message) ) return False # 获取相机特性描述 cap = mvsdk.CameraGetCapability(hCamera) # 判断是黑白相机还是彩色相机 monoCamera = (cap.sIspCapacity.bMonoSensor != 0) # 相机让ISP展成R=G=B的24位灰度 mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_BGR8) # 计算RGB buffer所需的大小,这里直接按照相机的最大分辨率来分配 FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * 3 # 分配RGB buffer,用来存放ISP输出的图像 # 备注:从相机传输到PC端的是RAW数据,在PC端通过软件ISP转为RGB数据(如果是黑白相机就不需要转换格式,但是ISP还有其它处理,所以也需要分配这个buffer) pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16) # 相机模式切换成连续采集 mvsdk.CameraSetTriggerMode(hCamera, 0) # 手动曝光,曝光时间30ms mvsdk.CameraSetAeState(hCamera, 0) # 曝光时间10ms mvsdk.CameraSetExposureTime(hCamera, 10 * 1000) # 曝光gain = 30 mvsdk.CameraSetAnalogGain(hCamera, 30) # ISP color gain & saturation mvsdk.CameraSetGain(hCamera, 106, 100, 110) mvsdk.CameraSetSaturation(hCamera, 99) # 让SDK内部取图线程开始工作 mvsdk.CameraPlay(hCamera) self.hCamera = hCamera self.pFrameBuffer = pFrameBuffer self.cap = cap return True
def main_loop(): # 枚举相机 DevList = mvsdk.CameraEnumerateDevice() nDev = len(DevList) if nDev < 1: print("No camera was found!") return for i, DevInfo in enumerate(DevList): print("{}: {} {}".format(i, DevInfo.GetFriendlyName(), DevInfo.GetPortType())) i = input("Select camera: ") DevInfo = DevList[i] print(DevInfo) # 打开相机 hCamera = 0 try: hCamera = mvsdk.CameraInit(DevInfo, -1, -1) except mvsdk.CameraException as e: print("CameraInit Failed({}): {}".format(e.error_code, e.message)) return # 获取相机特性描述 cap = mvsdk.CameraGetCapability(hCamera) # 判断是黑白相机还是彩色相机 monoCamera = (cap.sIspCapacity.bMonoSensor != 0) # 黑白相机让ISP直接输出MONO数据,而不是扩展成R=G=B的24位灰度 if monoCamera: mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8) # 相机模式切换成连续采集 mvsdk.CameraSetTriggerMode(hCamera, 0) mvsdk.CameraSetAeState(hCamera, 0) mvsdk.CameraSetExposureTime(hCamera, 5 * 1000) mvsdk.CameraSetGain(hCamera, 100, 100, 100) # 让SDK内部取图线程开始工作 mvsdk.CameraPlay(hCamera) # 计算RGB buffer所需的大小,这里直接按照相机的最大分辨率来分配 FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * ( 1 if monoCamera else 3) # 分配RGB buffer,用来存放ISP输出的图像 # 备注:从相机传输到PC端的是RAW数据,在PC端通过软件ISP转为RGB数据(如果是黑白相机就不需要转换格式,但是ISP还有其它处理,所以也需要分配这个buffer) pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16) prev_t = time.time() while (cv2.waitKey(1) & 0xFF) != ord('q'): # 从相机取一帧图片 try: pRawData, FrameHead = mvsdk.CameraGetImageBuffer(hCamera, 200) mvsdk.CameraImageProcess(hCamera, pRawData, pFrameBuffer, FrameHead) mvsdk.CameraReleaseImageBuffer(hCamera, pRawData) # 此时图片已经存储在pFrameBuffer中,对于彩色相机pFrameBuffer=RGB数据,黑白相机pFrameBuffer=8位灰度数据 # 把pFrameBuffer转换成opencv的图像格式以进行后续算法处理 frame_data = (mvsdk.c_ubyte * FrameHead.uBytes).from_address(pFrameBuffer) frame = np.frombuffer(frame_data, dtype=np.uint8) frame = frame.reshape((FrameHead.iHeight, FrameHead.iWidth, 1 if FrameHead.uiMediaType == mvsdk.CAMERA_MEDIA_TYPE_MONO8 else 3)) print(1 / (time.time() - prev_t)) prev_t = time.time() cv2.imshow("capture", frame) except mvsdk.CameraException as e: if e.error_code != mvsdk.CAMERA_STATUS_TIME_OUT: print("CameraGetImageBuffer failed({}): {}".format( e.error_code, e.message)) # 关闭相机 mvsdk.CameraUnInit(hCamera) # 释放帧缓存 mvsdk.CameraAlignFree(pFrameBuffer)
def main_loop(): # 枚举相机 DevList = mvsdk.CameraEnumerateDevice() nDev = len(DevList) if nDev < 1: print("No camera was found!") return DevInfo = DevList[0] print(DevInfo) # 打开相机 hCamera = 0 try: hCamera = mvsdk.CameraInit(DevInfo, -1, -1) except mvsdk.CameraException as e: print("CameraInit Failed({}): {}".format(e.error_code, e.message)) return # 获取相机特性描述 cap = mvsdk.CameraGetCapability(hCamera) # 判断是黑白相机还是彩色相机 monoCamera = (cap.sIspCapacity.bMonoSensor != 0) # 黑白相机让ISP直接输出MONO数据,而不是扩展成R=G=B的24位灰度 if monoCamera: mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8) # 相机模式切换成连续采集 mvsdk.CameraSetTriggerMode(hCamera, 0) # 手动曝光,曝光时间40ms mvsdk.CameraSetAeState(hCamera, 0) mvsdk.CameraSetExposureTime(hCamera, 30 * 1000) # # #手动增益 # print('相机增益', mvsdk.CameraGetGain(hCamera)) # #RGB的每个通道增益 mvsdk.CameraSetGain(hCamera, 200, 200, 200) # 让SDK内部取图线程开始工作 mvsdk.CameraPlay(hCamera) # 计算RGB buffer所需的大小,这里直接按照相机的最大分辨率来分配 FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * ( 1 if monoCamera else 3) # 分配RGB buffer,用来存放ISP输出的图像 # 备注:从相机传输到PC端的是RAW数据,在PC端通过软件ISP转为RGB数据(如果是黑白相机就不需要转换格式,但是ISP还有其它处理,所以也需要分配这个buffer) pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16) while (cv2.waitKey(1) & 0xFF) != ord('q'): # 从相机取一帧图片 try: pRawData, FrameHead = mvsdk.CameraGetImageBuffer(hCamera, 200) mvsdk.CameraImageProcess(hCamera, pRawData, pFrameBuffer, FrameHead) mvsdk.CameraReleaseImageBuffer(hCamera, pRawData) # 此时图片已经存储在pFrameBuffer中,对于彩色相机pFrameBuffer=RGB数据,黑白相机pFrameBuffer=8位灰度数据 # 把pFrameBuffer转换成opencv的图像格式以进行后续算法处理 frame_data = (mvsdk.c_ubyte * FrameHead.uBytes).from_address(pFrameBuffer) frame = np.frombuffer(frame_data, dtype=np.uint8) frame = frame.reshape((FrameHead.iHeight, FrameHead.iWidth, 1 if FrameHead.uiMediaType == mvsdk.CAMERA_MEDIA_TYPE_MONO8 else 3)) # # 实时显示 # cv2.imshow('frame', frame) #临时输出距离均值 #BGR X = cv2.pyrDown(cv2.pyrDown(frame)) #保存 cv2.imwrite( r'E:\pycharm\camera\navigationWithZ\camera_develop\result\capture\ori.bmp', frame) # #图片翻转 # frame=cv2.flip(frame,0) # #旋转 w, h = frame.shape[:2] # print(w,h) M = cv2.getRotationMatrix2D((h / 2, w / 2), 180, 1) frame = cv2.warpAffine(frame, M, (h, w)) # cv2.imshow('capture1',frame) # cv2.waitKey(0) # print(frame,type(frame),frame.shape) #深度学习框架调用 # frame=deep_convert(frame) #矩阵框架 #滤波 frame = cv2.GaussianBlur(frame, (0, 0), 1.8) # frame=cv2.blur(frame,(5,5)) # frame=cv2.medianBlur(frame,9) # frame=cv2.bilateralFilter(frame,d=0,sigmaColor=50,sigmaSpace=15) frame = image_input(frame) #临时计算距离平均 Y = cv2.pyrDown(cv2.pyrDown(frame)) #保存 cv2.imwrite( r'E:\pycharm\camera\navigationWithZ\camera_develop\result\capture\correct.bmp', frame) #显示距离均值 t = np.sum(((X - Y)**2)**0.5, axis=2) print('距离均值', np.mean(t), '距离方差', np.var(t)) # 图片翻转 # frame = cv2.flip(frame, 1) #实时显示 #滤波 # frame=cv2.GaussianBlur(frame,(0,0),1.8) # frame=cv2.blur(frame,(5,5)) #不加投影仪时显示使用 # frame=cv2.medianBlur(frame,3) # frame=cv2.bilateralFilter(frame,d=0,sigmaSpace=15,sigmaColor=100) # cv2.imshow('frame',frame) # cv2.waitKey( ) # #相机投影之间的矫正关系 fx = 0.67 fy = 0.67 frame = cv2.resize(frame, None, fx=fx, fy=fy) #贴到背景图 w, h, d = frame.shape print('shape', w, h, d) # frame=cv2.flip(frame,0) M = cv2.getRotationMatrix2D((h / 2, w / 2), 180, 1) frame = cv2.warpAffine(frame, M, (h, w)) # print(w,h) # 起点 l,t l, t = 145, 205 img_0[l:l + w, t:t + h, 0] = frame[:, :, 0] img_0[l:l + w, t:t + h, 1] = frame[:, :, 1] img_0[l:l + w, t:t + h, 2] = frame[:, :, 2] cv2.imshow('projector', img_0) except mvsdk.CameraException as e: if e.error_code != mvsdk.CAMERA_STATUS_TIME_OUT: print("CameraGetImageBuffer failed({}): {}".format( e.error_code, e.message)) # 关闭相机 mvsdk.CameraUnInit(hCamera) # 释放帧缓存 mvsdk.CameraAlignFree(pFrameBuffer)
def camera(): # 枚举相机 img = 0 DevList = mvsdk.CameraEnumerateDevice() nDev = len(DevList) if nDev < 1: print("No camera was found!") return DevInfo = DevList[0] print(DevInfo) # 打开相机 hCamera = 0 try: hCamera = mvsdk.CameraInit(DevInfo, -1, -1) except mvsdk.CameraException as e: print("CameraInit Failed({}): {}".format(e.error_code, e.message)) return # 获取相机特性描述 cap = mvsdk.CameraGetCapability(hCamera) # 判断是黑白相机还是彩色相机 monoCamera = (cap.sIspCapacity.bMonoSensor != 0) # 黑白相机让ISP直接输出MONO数据,而不是扩展成R=G=B的24位灰度 if monoCamera: mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8) # 相机模式切换成连续采集 mvsdk.CameraSetTriggerMode(hCamera, 0) # 手动曝光,曝光时间30ms mvsdk.CameraSetAeState(hCamera, 0) mvsdk.CameraSetExposureTime(hCamera, 40 * 1000) # # #手动增益 mvsdk.CameraSetGain(hCamera, 200, 200, 200) print(mvsdk.CameraGetGain(hCamera)) # 让SDK内部取图线程开始工作 mvsdk.CameraPlay(hCamera) # 计算RGB buffer所需的大小,这里直接按照相机的最大分辨率来分配 FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * ( 1 if monoCamera else 3) # 分配RGB buffer,用来存放ISP输出的图像 # 备注:从相机传输到PC端的是RAW数据,在PC端通过软件ISP转为RGB数据(如果是黑白相机就不需要转换格式,但是ISP还有其它处理,所以也需要分配这个buffer) pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16) while (cv2.waitKey(1) & 0xFF) != ord('q'): # 从相机取一帧图片 try: pRawData, FrameHead = mvsdk.CameraGetImageBuffer(hCamera, 200) mvsdk.CameraImageProcess(hCamera, pRawData, pFrameBuffer, FrameHead) mvsdk.CameraReleaseImageBuffer(hCamera, pRawData) # 此时图片已经存储在pFrameBuffer中,对于彩色相机pFrameBuffer=RGB数据,黑白相机pFrameBuffer=8位灰度数据 # 把pFrameBuffer转换成opencv的图像格式以进行后续算法处理 frame_data = (mvsdk.c_ubyte * FrameHead.uBytes).from_address(pFrameBuffer) frame = np.frombuffer(frame_data, dtype=np.uint8) frame = frame.reshape( (FrameHead.iHeight, FrameHead.iWidth, 1 if FrameHead.uiMediaType == mvsdk.CAMERA_MEDIA_TYPE_MONO8 else 3)) #numpy格式的图片 # #旋转 w, h = frame.shape[:2] # print(w,h) M = cv2.getRotationMatrix2D((h / 2, w / 2), 180, 1) frame_ = cv2.warpAffine(frame, M, (h, w)) #翻转 # frame=cv2.flip(frame,1) cv2.namedWindow('correct_capture', cv2.WINDOW_NORMAL) cv2.imshow("correct_capture", frame_) #返回结果 c = cv2.waitKey(1) if c == 32: img = frame_ break except mvsdk.CameraException as e: if e.error_code != mvsdk.CAMERA_STATUS_TIME_OUT: print("CameraGetImageBuffer failed({}): {}".format( e.error_code, e.message)) # 关闭相机 mvsdk.CameraUnInit(hCamera) # 释放帧缓存 mvsdk.CameraAlignFree(pFrameBuffer) #关闭窗口 cv2.destroyWindow('correct_capture') return img
def main_loop(): l = 50 t = 50 fx = 0.4 fy = 0.4 # 枚举相机 DevList = mvsdk.CameraEnumerateDevice() nDev = len(DevList) if nDev < 1: print("No camera was found!") return DevInfo = DevList[0] print(DevInfo) # 打开相机 hCamera = 0 try: hCamera = mvsdk.CameraInit(DevInfo, -1, -1) except mvsdk.CameraException as e: print("CameraInit Failed({}): {}".format(e.error_code, e.message)) return # 获取相机特性描述 cap = mvsdk.CameraGetCapability(hCamera) # 判断是黑白相机还是彩色相机 monoCamera = (cap.sIspCapacity.bMonoSensor != 0) # 黑白相机让ISP直接输出MONO数据,而不是扩展成R=G=B的24位灰度 if monoCamera: mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8) # 相机模式切换成连续采集 mvsdk.CameraSetTriggerMode(hCamera, 0) # 手动曝光,曝光时间40ms mvsdk.CameraSetAeState(hCamera, 0) mvsdk.CameraSetExposureTime(hCamera, 30 * 1000) # # #手动增益 # print('相机增益', mvsdk.CameraGetGain(hCamera)) # #RGB的每个通道增益 mvsdk.CameraSetGain(hCamera, 200, 200, 200) # 让SDK内部取图线程开始工作 mvsdk.CameraPlay(hCamera) # 计算RGB buffer所需的大小,这里直接按照相机的最大分辨率来分配 FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * ( 1 if monoCamera else 3) # 分配RGB buffer,用来存放ISP输出的图像 # 备注:从相机传输到PC端的是RAW数据,在PC端通过软件ISP转为RGB数据(如果是黑白相机就不需要转换格式,但是ISP还有其它处理,所以也需要分配这个buffer) pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16) while (1): # 从相机取一帧图片 try: pRawData, FrameHead = mvsdk.CameraGetImageBuffer(hCamera, 200) mvsdk.CameraImageProcess(hCamera, pRawData, pFrameBuffer, FrameHead) mvsdk.CameraReleaseImageBuffer(hCamera, pRawData) # 此时图片已经存储在pFrameBuffer中,对于彩色相机pFrameBuffer=RGB数据,黑白相机pFrameBuffer=8位灰度数据 # 把pFrameBuffer转换成opencv的图像格式以进行后续算法处理 frame_data = (mvsdk.c_ubyte * FrameHead.uBytes).from_address(pFrameBuffer) frame = np.frombuffer(frame_data, dtype=np.uint8) frame = frame.reshape((FrameHead.iHeight, FrameHead.iWidth, 1 if FrameHead.uiMediaType == mvsdk.CAMERA_MEDIA_TYPE_MONO8 else 3)) # # 实时显示 # cv2.imshow('frame', frame) #临时输出距离均值 #BGR X = cv2.pyrDown(cv2.pyrDown(frame)) #保存 cv2.imwrite( r'E:\pycharm\camera\navigationWithZ\camera_develop\result\capture\ori.bmp', frame) # #图片翻转 # frame=cv2.flip(frame,0) # #旋转 w, h = frame.shape[:2] # print(w,h) M = cv2.getRotationMatrix2D((h / 2, w / 2), 180, 1) frame = cv2.warpAffine(frame, M, (h, w)) # cv2.imshow('capture1',frame) # cv2.waitKey(0) # print(frame,type(frame),frame.shape) #深度学习框架调用 # frame=deep_convert(frame) #矩阵框架 #滤波 frame = cv2.GaussianBlur(frame, (0, 0), 1.8) # frame=cv2.blur(frame,(5,5)) # frame=cv2.medianBlur(frame,9) # frame=cv2.bilateralFilter(frame,d=0,sigmaColor=50,sigmaSpace=15) frame = image_input(frame) #临时计算距离平均 Y = cv2.pyrDown(cv2.pyrDown(frame)) #保存 cv2.imwrite( r'E:\pycharm\camera\navigationWithZ\camera_develop\result\capture\correct.bmp', frame) #显示距离均值 t = np.sum(((X - Y)**2)**0.5, axis=2) print('距离均值', np.mean(t), '距离方差', np.var(t)) # 图片翻转 # frame = cv2.flip(frame, 1) #实时显示 #滤波 # frame=cv2.GaussianBlur(frame,(0,0),1.8) # frame=cv2.blur(frame,(5,5)) #不加投影仪时显示使用 # frame=cv2.medianBlur(frame,3) # frame=cv2.bilateralFilter(frame,d=0,sigmaSpace=15,sigmaColor=100) # cv2.imshow('frame',frame) # cv2.waitKey( ) # #相机投影之间的矫正关系 # fx=0.4 # fy=0.4 # switch(waitKey(1)) # { # fx= # fy= # l= # t= # default: break; # } # frame=cv2.resize(frame,None,fx=fx,fy=fy) #贴到背景图 w, h, d = frame.shape print('shape', w, h, d) # frame=cv2.flip(frame,0) M = cv2.getRotationMatrix2D((h / 2, w / 2), 180, 1) frame = cv2.warpAffine(frame, M, (h, w)) # print(w,h) # 起点 l,t # cv2.waitKey() # # t =raw_input("请输入") t = 50 l = 50 # ti = cv2.waitKey(33) # if ti == 49: # fx=fx+0.01 # elif ti == 50: # fy = fy + 0.01 # else: # fx=fx2 ######### # img = cv2.imread('360.png') # print(img.shape) # img_copy = deepcopy(frame) # img2 = np.zeros([800, 1280, 3], np.uint8) # # img2 = np.zeros((1024, 1920, 3), np.uint8) # img2_copy = deepcopy(img2) # cv2.namedWindow('image_cp',cv2.WINDOW_NORMAL) # cv2.resizeWindow('image_cp',640,480) # cv2.namedWindow('image', cv2.WINDOW_NORMAL) # cv2.resizeWindow('image', 640, 480) # cv2.namedWindow('image2', cv2.WINDOW_NORMAL) # cv2.resizeWindow('image2', 640, 480) # cv2.namedWindow('image2_cp',cv2.WINDOW_NORMAL) # cv2.resizeWindow('image2_cp',640,480) # cv2.setMouseCallback('image_0 ', draw_circle) # cv2.setMouseCallback('image2', copy_circle) ## # img_0[l:l + w, t:t + h, 0] = frame[:, :, 0] # img_0[l:l + w, t:t + h, 1] = frame[:, :, 1] # img_0[l:l + w, t:t + h, 2] = frame[:, :, 2] # img_copy = deepcopy(img_0) # img2 = np.zeros([800, 1280, 3], np.uint8) # img2 = np.zeros((1024, 1920, 3), np.uint8) # img2_copy = deepcopy(img2) frame = cv2.resize(frame, (1280, 800)) dst = cv2.add(frame, img_3) cv2.imshow('projector0', dst) # 图像放大与平移 img2 = img2[0:600, 0:1000, 3] # 图像缩小(加黑边) cv2.copyMakeBorder(img2, 0, 0, 0, 0, cv2.BORDER_ISOLATED, img2, (0, 0, 0)) cv2.imshow('projector1', img2) k = cv2.waitKey(1) & 0xFF if k == ord('m'): mode = not mode elif k == ord('q'): break # img2[l:l + w, t:t + h, 0] = frame[:, :, 0] # img2[l:l + w, t:t + h, 1] = frame[:, :, 1] # img2[l:l + w, t:t + h, 2] = frame[:, :, 2] # cv2.imshow('projector', img2) # frame1[l:l + w, t:t + h, 0] = frame[:, :, 0] # frame1[l:l + w, t:t + h, 1] = frame[:, :, 1] # frame1[l:l + w, t:t + h, 2] = frame[:, :, 2] # cv2.imshow('projector', frame1) ######### # # img = cv2.imread('360.png') # # print(img.shape) # img_copy = deepcopy(img_0) # img2 = np.zeros([800, 1280, 3], np.uint8) # # img2 = np.zeros((1024, 1920, 3), np.uint8) # img2_copy = deepcopy(img2) # # cv2.namedWindow('image_cp',cv2.WINDOW_NORMAL) # # cv2.resizeWindow('image_cp',640,480) # # cv2.namedWindow('image', cv2.WINDOW_NORMAL) # # cv2.resizeWindow('image', 640, 480) # # cv2.namedWindow('image2', cv2.WINDOW_NORMAL) # # cv2.resizeWindow('image2', 640, 480) # # cv2.namedWindow('image2_cp',cv2.WINDOW_NORMAL) # # cv2.resizeWindow('image2_cp',640,480) # cv2.waitKey(200) # ## # cv2.setMouseCallback('image_0 ', draw_circle) # cv2.setMouseCallback('image2', copy_circle) except mvsdk.CameraException as e: if e.error_code != mvsdk.CAMERA_STATUS_TIME_OUT: print("CameraGetImageBuffer failed({}): {}".format( e.error_code, e.message)) # 关闭相机 mvsdk.CameraUnInit(hCamera) # 释放帧缓存 mvsdk.CameraAlignFree(pFrameBuffer)
try: hCamera = mvsdk.CameraInit(DevInfo, -1, -1) except mvsdk.CameraException as e: print("CameraInit Failed({}): {}".format(e.error_code, e.message)) cap = mvsdk.CameraGetCapability(hCamera) monoCamera = (cap.sIspCapacity.bMonoSensor != 0) if monoCamera: mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8) mvsdk.CameraSetTriggerMode(hCamera, 0) mvsdk.CameraSetAeState(hCamera, 0) mvsdk.CameraSetExposureTime(hCamera, 5 * 1000) mvsdk.CameraSetGain(hCamera, gain_r, 0, gain_b) mvsdk.CameraPlay(hCamera) FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * ( 1 if monoCamera else 3) pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16) prev = time.time() if record == True: fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter( time.ctime(time.time()) + '.avi', fourcc, 20.0, (640, 360)) prev = time.time() i = 0 q_size = 10 fps_l = np.zeros(q_size)
def main_loop(): # 枚举相机 DevList = mvsdk.CameraEnumerateDevice() nDev = len(DevList) if nDev < 1: print("No camera was found!") return for i, DevInfo in enumerate(DevList): print("{}: {} {}".format(i, DevInfo.GetFriendlyName(), DevInfo.GetPortType())) i = 0 if nDev == 1 else int(input("Select camera: ")) DevInfo = DevList[i] print(DevInfo) # 打开相机 hCamera = 0 try: hCamera = mvsdk.CameraInit(DevInfo, -1, -1) except mvsdk.CameraException as e: print("CameraInit Failed({}): {}".format(e.error_code, e.message)) return # 获取相机特性描述 cap = mvsdk.CameraGetCapability(hCamera) # 判断是黑白相机还是彩色相机 monoCamera = (cap.sIspCapacity.bMonoSensor != 0) # 黑白相机让ISP直接输出MONO数据,而不是扩展成R=G=B的24位灰度 if monoCamera: mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8) else: mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_COLOR) # 相机模式切换成连续采集 mvsdk.CameraSetTriggerMode(hCamera, 0) # 手动曝光,曝光时间20ms mvsdk.CameraSetAeState(hCamera, 0) mvsdk.CameraSetExposureTime(hCamera, 5 * 1000) mvsdk.CameraSetSaturation(hCamera, 142) #saturation mvsdk.CameraSetContrast(hCamera, 150) #contrast mvsdk.CameraSetAnalogGain(hCamera, 7) #Anolog gain mvsdk.CameraSetSharpness(hCamera, 150) #sharpness mvsdk.CameraSetGain(hCamera, 144, 118, 100) #color_gain mvsdk.CameraSetGamma(hCamera, 50) #gamma #mvsdk.CameraSetWhiteLevel(hCamera, 1)#white balance # 让SDK内部取图线程开始工作 mvsdk.CameraPlay(hCamera) # 计算RGB buffer所需的大小,这里直接按照相机的最大分辨率来分配 FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * ( 1 if monoCamera else 3) # 分配RGB buffer,用来存放ISP输出的图像 # 备注:从相机传输到PC端的是RAW数据,在PC端通过软件ISP转为RGB数据(如果是黑白相机就不需要转换格式,但是ISP还有其它处理,所以也需要分配这个buffer) pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16) count = 0 time1 = time.time() a = 0 cv2.namedWindow('Press q to end') cv2.createTrackbar('case1:', 'Press q to end', 0, 100, nothing) cv2.createTrackbar('case2:', 'Press q to end', 30, 100, nothing) cv2.createTrackbar('case3:', 'Press q to end', 109, 200, nothing) ax2 = ay2 = 0 a2 = cv2.getTrackbarPos('Saturation:', 'Press q to end') hsvv = 100 countc = 0 countc2 = 0 while (cv2.waitKey(1) & 0xFF) != ord('q'): count += 1 portx = "/dev/ttyUSB0" bps = 115200 #超时设置,None:永远等待操作,0为立即返回 ser = serial.Serial(portx, bps, timeout=1) b = cv2.getTrackbarPos('case1:', 'Press q to end') b2 = cv2.getTrackbarPos('case2:', 'Press q to end') b3 = cv2.getTrackbarPos('case3:', 'Press q to end') #mvsdk.CameraSetBlackLevel(hCamera,b) #mvsdk.CameraSetAnalogGain(hCamera,b2) mvsdk.CameraSetExposureTime(hCamera, 12 * 1000) #mvsdk.CameraSetAnalogGain(hCamera,0.5) if time.time() - time1 >= 1: time1 = time.time() print('frames:', count) print(a) count = 0 # 从相机取一帧图片 try: pRawData, FrameHead = mvsdk.CameraGetImageBuffer(hCamera, 200) mvsdk.CameraImageProcess(hCamera, pRawData, pFrameBuffer, FrameHead) mvsdk.CameraReleaseImageBuffer(hCamera, pRawData) # 此时图片已经存储在pFrameBuffer中,对于彩色相机pFrameBuffer=RGB数据,黑白相机pFrameBuffer=8位灰度数据 # 把pFrameBuffer转换成opencv的图像格式以进行后续算法处理 frame_data = (mvsdk.c_ubyte * FrameHead.uBytes).from_address(pFrameBuffer) frame = np.frombuffer(frame_data, dtype=np.uint8) frame = frame.reshape((FrameHead.iHeight, FrameHead.iWidth, 1 if FrameHead.uiMediaType == mvsdk.CAMERA_MEDIA_TYPE_MONO8 else 3)) frame = cv2.resize(frame, (320 + b * 4, 240 + b * 3), interpolation=cv2.INTER_LINEAR) frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) (B, G, R) = cv2.split(frame) #auto white balance r_avg = cv2.mean(R)[0] g_avg = cv2.mean(G)[0] b_avg = cv2.mean(B)[0] k = (r_avg + g_avg + b_avg) / 3 if k > 150: k = 150 elif k < 120: k = 120 kr = k / r_avg kg = k / g_avg kb = k / b_avg r = cv2.addWeighted(src1=R, alpha=kr, src2=0, beta=0, gamma=0) g = cv2.addWeighted(src1=G, alpha=kg, src2=0, beta=0, gamma=0) b = cv2.addWeighted(src1=B, alpha=kb, src2=0, beta=0, gamma=0) frame2 = frame frame = cv2.merge([b, g, r]) cv2.setMouseCallback("Press q to end", mouse_event) if (cv2.waitKey(1) & 0xFF) == ord('c'): countc += 1 name = "pictures/bizhi" + str(countc) + ".jpg" cv2.imwrite(name, frame) print('succeed!') if ax2 != ax: print('PIX:', ax, ay) print('BGR:', frame[ay, ax]) print('HSV:', hsv[ay, ax]) ax2 = ax ay2 = ay lower_green = np.array([0, 105, 25]) #dark 0,80,25,light 0,105,25 upper_green = np.array([56, 255, 109]) #light56,255,109#dark20,100,40 mask = cv2.inRange(frame, lower_green, upper_green) mask = cv2.blur(mask, (9, 9)) mask = cv2.Canny(mask, 120, 140) mask = cv2.blur(mask, (5, 5)) image, cnt, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) rad1 = 15 cen1 = (160, 120) for cnt1 in cnt: cen, rad = cv2.minEnclosingCircle(cnt1) cen = tuple(np.int0(cen)) rad = np.int32(rad) if (rad >= rad1): rad1 = int(rad / 1.18) cen1 = cen frame = cv2.circle(frame2, cen1, rad1, (0, 255, 0), 2) frame = cv2.circle(frame2, cen1, 1, (255, 0, 0), 2) #print(cen1[1],cen1[0]) ######################################maping to real axis/unit: mm/radius of real round: 150/delta pitch:91/delta yaw:68 P = 240 / 4.8 #density of pixels/unit: pixels*mm^-1 D2 = 6 #*1.31#the focal length Q2 = 91 #delta pitch Q1 = 68 #delta yaw Rreal = 150 D1 = ( Rreal / (rad1 / P)) * D2 #get the real distance between len and object cenn0 = int(cen1[0] - (Q1 / D1) * D2 * P) cenn1 = int(cen1[1] - (Q2 / D1) * D2 * P) #cen1=(cenn0,cenn1) #print(D1,rad1) #print(cen1[1],cen1[0]) ######################################send to mcuyaw,yaw28',pitch22' yawx = cen1[0] - 160 - 31 pitchy = 120 - cen1[1] yawAngle = math.atan(yawx / 50 / D2) * 180 / 3.14159 pitchAngle = math.atan(pitchy / 50 / D2) * 180 / 3.14159 #print(yawAngle,pitchAngle) intYawAngle = int(yawAngle * 5 + 220 + 140) intPitchAngle = int(pitchAngle * 5 + 110) head = int(intYawAngle / 64) #move right 6 bits,and get it tail = (intYawAngle - int(intYawAngle / 64) * 64) #get last 6 bits head = head + 64 #give flag bit "1" to first bit head = bytes(chr(head), encoding="utf-8") ser.write(head) time.sleep(0.0003) tail = bytes(chr(tail), encoding="utf-8") ser.write(tail) time.sleep(0.0003) head = int(intPitchAngle / 64) #move right 6 bits,and get it tail = (intPitchAngle - int(intPitchAngle / 64) * 64 ) #get last 6 bits head = head + 64 #give flag bit "1" to first bit head = bytes(chr(head), encoding="utf-8") ser.write(head) time.sleep(0.0003) tail = bytes(chr(tail), encoding="utf-8") ser.write(tail) ##################################### #第二参数()内是圆心坐标,第三参数是半径,第四参数()内是颜色,第五参数是线条粗细 #cv2.imshow("img",image) cv2.imshow("Press q to end", frame2) name2 = "sendPitcures/bizhi" + ".jpg" #cv2.imwrite(name2,frame2)#sudo chmod 666 /dev/ttyUSB0 except mvsdk.CameraException as e: if e.error_code != mvsdk.CAMERA_STATUS_TIME_OUT: print("CameraGetImageBuffer failed({}): {}".format( e.error_code, e.message)) # 关闭相机 mvsdk.CameraUnInit(hCamera) # 释放帧缓存 mvsdk.CameraAlignFree(pFrameBuffer)
def startCam(self): # 枚举相机 DevList = mvsdk.CameraEnumerateDevice() # DevList = mvsdk.CameraEnumerateDeviceEx() nDev = len(DevList) print('Camera numbers:', nDev) for i in range(nDev): if self.cam_sn == DevList[i].acSn: DevInfo = DevList[i] break if DevInfo == 0: print('color_cam_sn is wrong, can not get Devinfo, please check') return print("Devinfo:", DevInfo) # 打开相机 hCamera = 0 try: hCamera = mvsdk.CameraInit(DevInfo, -1, -1) except mvsdk.CameraException as e: print("CameraInit Failed({}): {}".format(e.error_code, e.message)) return # 获取相机特性描述 cap = mvsdk.CameraGetCapability(hCamera) # 判断是黑白相机还是彩色相机 monoCamera = (cap.sIspCapacity.bMonoSensor != 0) # 黑白相机让ISP直接输出MONO数据,而不是扩展成R=G=B的24位灰度 if monoCamera: mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8) # 相机模式切换成连续采集 mvsdk.CameraSetTriggerMode(hCamera, 0) # 手动曝光,曝光时间30ms mvsdk.CameraSetAeState(hCamera, 0) mvsdk.CameraSetGain(hCamera, int(self.once_wb[0]), int(self.once_wb[1]), int(self.once_wb[2])) # set gain to make the image lighter mvsdk.CameraSetAnalogGain(hCamera, 20) mvsdk.CameraSetExposureTime(hCamera, float(self.exposetime) * 1000) # mvsdk.CameraSetSaturation(hCamera, 0) # mvsdk.CameraSetSharpness(hCamera, 0) tmp_ok = mvsdk.CameraGetSharpness(hCamera) print(tmp_ok) # 设置分辨率 # sRoiResolution = mvsdk.CameraGetImageResolution(hCamera) # print(type(sRoiResolution)) # print(sRoiResolution) # sRoiResolution.iIndex = 10 # mvsdk.CameraSetImageResolution(hCamera, sRoiResolution) # sRoiResolution = mvsdk.CameraGetImageResolution(hCamera) # print(sRoiResolution) # 让SDK内部取图线程开始工作 mvsdk.CameraPlay(hCamera) # 计算RGB buffer所需的大小,这里直接按照相机的最大分辨率来分配 FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * ( 1 if monoCamera else 3) # 分配RGB buffer,用来存放ISP输出的图像 # 备注:从相机传输到PC端的是RAW数据,在PC端通过软件ISP转为RGB数据(如果是黑白相机就不需要转换格式,但是ISP还有其它处理,所以也需要分配这个buffer) pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16) self.mvsdk = mvsdk self.hCamera = hCamera self.pFrameBuffer = pFrameBuffer