Example #1
0
def main_loop():
    # 枚举相机
    DevList = mvsdk.CameraEnumerateDevice()
    nDev = len(DevList)
    if nDev < 1:
        print("No camera was found!")
        return

    DevInfo = DevList[0]
    print(DevInfo)

    # 打开相机
    hCamera = 0
    try:
        hCamera = mvsdk.CameraInit(DevInfo, -1, -1)
    except mvsdk.CameraException as e:
        print("CameraInit Failed({}): {}".format(e.error_code, e.message))
        return

    # 获取相机特性描述
    cap = mvsdk.CameraGetCapability(hCamera)

    # 判断是黑白相机还是彩色相机
    monoCamera = (cap.sIspCapacity.bMonoSensor != 0)

    # 黑白相机让ISP直接输出MONO数据,而不是扩展成R=G=B的24位灰度
    if monoCamera:
        mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8)

    # 相机模式切换  连续采集0,软触发1,硬触发2
    mvsdk.CameraSetTriggerMode(hCamera, 2)

    mvsdk.CameraGetTriggerMode(hCamera)

    # 设置并获取硬触发信号种类  0上升沿触发 1下降沿 2高电平 3低电平
    mvsdk.CameraSetExtTrigSignalType(hCamera, 1)

    mvsdk.CameraGetExtTrigSignalType(hCamera)

    # 设置并获取相机外触发模式下的触发延迟时间,单位是微秒 当硬触发信号来临后,经过指定的延时,再开始采集图像。
    mvsdk.CameraSetTriggerDelayTime(hCamera, 0)

    mvsdk.CameraGetTriggerDelayTime(hCamera)

    # 另一种设置外触发信号延迟时间的函数
    # mvsdk.CameraSetExtTrigDelayTime(hCamera, 0)

    # mvsdk.CameraGetExtTrigDelayTime(hCamera)

    # 设置触发模式下  一次触发的帧数 默认为1帧
    # mvsdk.CameraSetTriggerCount(hCamera, 1)

    # mvsdk.CameraGetTriggerCount(hCamera)

    # 手动曝光,曝光时间30ms
    # mvsdk.CameraSetAeState(hCamera, 0)
    # mvsdk.CameraSetExposureTime(hCamera, 30 * 1000)

    # 让SDK内部取图线程开始工作
    mvsdk.CameraPlay(hCamera)

    # 计算RGB buffer所需的大小,这里直接按照相机的最大分辨率来分配
    FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * (
        1 if monoCamera else 3)

    # 分配RGB buffer,用来存放ISP输出的图像
    # 备注:从相机传输到PC端的是RAW数据,在PC端通过软件ISP转为RGB数据(如果是黑白相机就不需要转换格式,但是ISP还有其它处理,所以也需要分配这个buffer)
    pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16)

    while (cv2.waitKey(1) & 0xFF) != ord('q'):
        # 从相机取一帧图片
        try:

            pRawData, FrameHead = mvsdk.CameraGetImageBuffer(hCamera, 200)
            mvsdk.CameraImageProcess(hCamera, pRawData, pFrameBuffer,
                                     FrameHead)
            mvsdk.CameraReleaseImageBuffer(hCamera, pRawData)

            # 此时图片已经存储在pFrameBuffer中,对于彩色相机pFrameBuffer=RGB数据,黑白相机pFrameBuffer=8位灰度数据
            # 把pFrameBuffer转换成opencv的图像格式以进行后续算法处理
            frame_data = (mvsdk.c_ubyte *
                          FrameHead.uBytes).from_address(pFrameBuffer)
            frame = np.frombuffer(frame_data, dtype=np.uint8)
            frame = frame.reshape((FrameHead.iHeight, FrameHead.iWidth,
                                   1 if FrameHead.uiMediaType
                                   == mvsdk.CAMERA_MEDIA_TYPE_MONO8 else 3))

            cv2.imshow("capture", frame)

        except mvsdk.CameraException as e:
            if e.error_code != mvsdk.CAMERA_STATUS_TIME_OUT:
                print("CameraGetImageBuffer failed({}): {}".format(
                    e.error_code, e.message))

    # 关闭相机
    mvsdk.CameraUnInit(hCamera)

    # 释放帧缓存
    mvsdk.CameraAlignFree(pFrameBuffer)
Example #2
0
    def __init__(self):
        # 枚举相机
        DevList = mvsdk.CameraEnumerateDevice()
        self.nDev = len(DevList)
        if self.nDev < 1:
            print("No camera was found!")
            return

        self.DevInfo = DevList[0]
        print(self.DevInfo)

        # 打开相机
        self.hCamera = 0
        try:
            self.hCamera = mvsdk.CameraInit(self.DevInfo, -1, -1)
        except mvsdk.CameraException as e:
            print("CameraInit Failed({}): {}".format(e.error_code, e.message))
            return

        # 获取相机特性描述
        self.cap = mvsdk.CameraGetCapability(self.hCamera)

        # 判断是黑白相机还是彩色相机
        self.monoCamera = (self.cap.sIspCapacity.bMonoSensor != 0)

        # 黑白相机让ISP直接输出MONO数据,而不是扩展成R=G=B的24位灰度
        if self.monoCamera:
            mvsdk.CameraSetIspOutFormat(self.hCamera,
                                        mvsdk.CAMERA_MEDIA_TYPE_MONO8)

        # 相机模式切换  连续采集0,软触发1,硬触发2
        mvsdk.CameraSetTriggerMode(self.hCamera, 0)

        mvsdk.CameraGetTriggerMode(self.hCamera)

        # 设置并获取硬触发信号种类  0上升沿触发 1下降沿 2高电平 3低电平
        # mvsdk.CameraSetExtTrigSignalType(self.hCamera, 1)
        #
        # mvsdk.CameraGetExtTrigSignalType(self.hCamera)

        # 设置并获取相机外触发模式下的触发延迟时间,单位是微秒 当硬触发信号来临后,经过指定的延时,再开始采集图像。
        # mvsdk.CameraSetTriggerDelayTime(self.hCamera, 100)
        #
        # mvsdk.CameraGetTriggerDelayTime(self.hCamera)

        # 另一种设置外触发信号延迟时间的函数
        # mvsdk.CameraSetExtTrigDelayTime(hCamera, 0)

        # mvsdk.CameraGetExtTrigDelayTime(hCamera)

        # 设置触发模式下  一次触发的帧数 默认为1帧
        # mvsdk.CameraSetTriggerCount(hCamera, 1)

        # mvsdk.CameraGetTriggerCount(hCamera)

        # 手动曝光 曝光时间3ms
        mvsdk.CameraSetAeState(self.hCamera, 0)
        mvsdk.CameraSetExposureTime(self.hCamera, 30 * 1000)

        # 设置增益,获得增益
        mvsdk.CameraSetAnalogGain(self.hCamera, 1)
        mvsdk.CameraGetAnalogGain(self.hCamera)

        # 让SDK内部取图线程开始工作
        mvsdk.CameraPlay(self.hCamera)

        # 计算RGB buffer所需的大小,这里直接按照相机的最大分辨率来分配
        self.FrameBufferSize = self.cap.sResolutionRange.iWidthMax * self.cap.sResolutionRange.iHeightMax *\
                               (1 if self.monoCamera else 3)

        # 设置相机的分辨率,外触发需要调至800*600居中裁剪
        mvsdk.CameraSetImageResolution(
            self.hCamera, mvsdk.CameraCustomizeResolution(self.hCamera))

        # 分配RGB buffer,用来存放ISP输出的图像
        # 备注:从相机传输到PC端的是RAW数据,在PC端通过软件ISP转为RGB数据(如果是黑白相机就不需要转换格式,但是ISP还有其它处理,所以也需要分配这个buffer)
        self.pFrameBuffer = mvsdk.CameraAlignMalloc(self.FrameBufferSize, 16)
Example #3
0
def main_loop():
    # 枚举相机
    DevList = mvsdk.CameraEnumerateDevice()
    nDev = len(DevList)
    if nDev < 1:
        print("No camera was found!")
        return

    DevInfo = DevList[0]
    print(DevInfo)

    # 打开相机
    hCamera = 0
    try:
        hCamera = mvsdk.CameraInit(DevInfo, -1, -1)
    except mvsdk.CameraException as e:
        print("CameraInit Failed({}): {}".format(e.error_code, e.message))
        return

    # 获取相机特性描述
    cap = mvsdk.CameraGetCapability(hCamera)

    # 判断是黑白相机还是彩色相机
    monoCamera = (cap.sIspCapacity.bMonoSensor != 0)

    # 黑白相机让ISP直接输出MONO数据,而不是扩展成R=G=B的24位灰度
    if monoCamera:
        mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8)

    # 相机模式切换成软触发采集 软触发是1
    mvsdk.CameraSetTriggerMode(hCamera, 1)

    # 获取相机的触发模式
    mvsdk.CameraGetTriggerMode(hCamera)

    # 设置闪光灯STROBE信号的模式 0是自动,不能设置延迟时间,和脉冲宽度,1是手动
    mvsdk.CameraSetStrobeMode(hCamera, 1)

    # 设置高低电平模式
    mvsdk.CameraSetStrobePolarity(hCamera, 1)
    mvsdk.CameraGetStrobePolarity(hCamera)

    # # 设置延迟时间
    mvsdk.CameraSetStrobeDelayTime(hCamera, 0)
    #
    # # 设置脉冲宽度,单位是微秒 和相机曝光时间相等
    mvsdk.CameraSetStrobePulseWidth(hCamera, 1 * 1000)

    # 手动曝光,设置曝光时间,(exposureTime的单位是微秒)
    mvsdk.CameraSetAeState(hCamera, 0)
    mvsdk.CameraSetExposureTime(hCamera, 1 * 1000)
    mvsdk.CameraGetExposureTime(hCamera)

    # 设置增益,获得增益
    mvsdk.CameraSetAnalogGain(hCamera, 8)
    mvsdk.CameraGetAnalogGain(hCamera)

    # 设置相机的帧率,并返回帧率大小
    # mvsdk.CameraSetFrameSpeed(hCamera,1 )
    # mvsdk.CameraGetFrameSpeed(hCamera)

    # 让SDK内部取图线程开始工作
    mvsdk.CameraPlay(hCamera)

    # 计算RGB buffer所需的大小,这里直接按照相机的最大分辨率来分配
    FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * (
        1 if monoCamera else 3)

    # 分配RGB buffer,用来存放ISP输出的图像
    # 备注:从相机传输到PC端的是RAW数据,在PC端通过软件ISP转为RGB数据(如果是黑白相机就不需要转换格式,但是ISP还有其它处理,所以也需要分配这个buffer)
    pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16)

    while (cv2.waitKey(1) & 0xFF) != ord('q'):
        # 从相机取一帧图片
        start = test.clock()
        try:
            mvsdk.CameraSoftTrigger(hCamera)
            pRawData, FrameHead = mvsdk.CameraGetImageBuffer(hCamera, 200)
            mvsdk.CameraImageProcess(hCamera, pRawData, pFrameBuffer,
                                     FrameHead)
            mvsdk.CameraReleaseImageBuffer(hCamera, pRawData)

            # 此时图片已经存储在pFrameBuffer中,对于彩色相机pFrameBuffer=RGB数据,黑白相机pFrameBuffer=8位灰度数据
            # 把pFrameBuffer转换成opencv的图像格式以进行后续算法处理
            frame_data = (mvsdk.c_ubyte *
                          FrameHead.uBytes).from_address(pFrameBuffer)
            frame = np.frombuffer(frame_data, dtype=np.uint8)
            frame = frame.reshape((FrameHead.iHeight, FrameHead.iWidth,
                                   1 if FrameHead.uiMediaType
                                   == mvsdk.CAMERA_MEDIA_TYPE_MONO8 else 3))
            cv2.namedWindow('capture', cv2.WINDOW_NORMAL)
            cv2.resizeWindow('capture', 1600, 1200)
            frame = cv2.flip(frame, 0)
            # frame = cv2.GaussianBlur(frame,(5,5),5)
            # frame = cv2.medianBlur(frame,5)
            # frame = cv2.bilateralFilter(frame,9,75,75)
            cv2.imshow("capture", frame)
            end = test.clock()
            print('一帧', end - start)

        except mvsdk.CameraException as e:
            if e.error_code != mvsdk.CAMERA_STATUS_TIME_OUT:
                print("CameraGetImageBuffer failed({}): {}".format(
                    e.error_code, e.message))

    # 关闭相机
    mvsdk.CameraUnInit(hCamera)

    # 释放帧缓存
    mvsdk.CameraAlignFree(pFrameBuffer)