예제 #1
0
    def run(self):
        try:
            # 枚举相机
            DevList = mvsdk.CameraEnumerateDevice()
            nDev = len(DevList)
            if nDev < 1:
                print("No camera was found!")
                return

            for i, DevInfo in enumerate(DevList):
                print("{}: {} {}".format(i, DevInfo.GetFriendlyName(),
                                         DevInfo.GetPortType()))

            i = 0 if nDev == 1 else int(input("Select camera: "))
            cam = Cam.Camera(DevList[i])
            if not cam.open():
                print('cam open error...')
                return

            while (cv2.waitKey(1) & 0xFF) != ord('q'):
                frame = cam.grab()
                if frame is not None:
                    # -------------insert your own algorithms below--------------------
                    if self.usemethod1flag:
                        qrd.qrDectbyZbar(frame)
                    # -----------------------------end---------------------------------
                    cv2.imshow(
                        "{} Press q to end".format(
                            cam.DevInfo.GetFriendlyName()), frame)

            cam.close()
        finally:
            cv2.destroyAllWindows()
예제 #2
0
def main_loop():
    # 枚举相机
    DevList = mvsdk.CameraEnumerateDevice()
    nDev = len(DevList)
    if nDev < 1:
        print("No camera was found!")
        return

    for i, DevInfo in enumerate(DevList):
        print("{}: {} {}".format(i, DevInfo.GetFriendlyName(),
                                 DevInfo.GetPortType()))

    cams = []
    for i in map(lambda x: int(x), raw_input("Select cameras: ").split()):
        cam = Camera(DevList[i])
        if cam.open():
            cams.append(cam)

    while (cv2.waitKey(1) & 0xFF) != ord('q'):
        for cam in cams:
            frame = cam.grab()
            if frame is not None:
                frame = cv2.resize(frame, (640, 480),
                                   interpolation=cv2.INTER_LINEAR)
                cv2.imshow(
                    "{} Press q to end".format(cam.DevInfo.GetFriendlyName()),
                    frame)

    for cam in cams:
        cam.close()
예제 #3
0
    def startCam(self):
        DevList = mvsdk.CameraEnumerateDevice()
        nDev = len(DevList)
        print('Camera numbers:', nDev)
        if nDev < 1:
            print("No camera was found!")
            return
        DevInfo = 0
        for i in range(nDev):
            if self.color_cam_sn == DevList[i].acSn:
                DevInfo = DevList[i]
        if DevInfo == 0:
            print('color_cam_sn is wrong, can not get Devinfo, please check')
            return
        print("Devinfo:", DevInfo)

        # 打开相机
        hCamera = 0
        try:
            hCamera = mvsdk.CameraInit(DevInfo, -1, -1)
        except mvsdk.CameraException as e:
            print("CameraInit Failed({}): {}".format(e.error_code, e.message))
            return

        # 获取相机特性描述
        cap = mvsdk.CameraGetCapability(hCamera)

        # 判断是黑白相机还是彩色相机
        monoCamera = (cap.sIspCapacity.bMonoSensor != 0)

        # 黑白相机让ISP直接输出MONO数据,而不是扩展成R=G=B的24位灰度
        if monoCamera:
            mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8)

        # 相机模式切换成连续采集
        mvsdk.CameraSetTriggerMode(hCamera, 0)

        # 手动曝光,曝光时间30ms
        mvsdk.CameraSetAeState(hCamera, 0)
        #        mvsdk.CameraSetExposureTime(hCamera, 30 * 1000)
        mvsdk.CameraSetExposureTime(hCamera, int(self.exposetime) * 1000)

        # 让SDK内部取图线程开始工作
        mvsdk.CameraPlay(hCamera)

        # 计算RGB buffer所需的大小,这里直接按照相机的最大分辨率来分配
        FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * (
            1 if monoCamera else 3)

        # 分配RGB buffer,用来存放ISP输出的图像
        # 备注:从相机传输到PC端的是RAW数据,在PC端通过软件ISP转为RGB数据(如果是黑白相机就不需要转换格式,但是ISP还有其它处理,所以也需要分配这个buffer)
        pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16)

        self.mvsdk = mvsdk
        self.hCamera = hCamera
        self.pFrameBuffer = pFrameBuffer
예제 #4
0
def get_first_available_camera():
    devList = mvsdk.CameraEnumerateDevice()
    nDev = len(devList)
    if nDev < 1:
        print("No camera was found!")
        return None

    devInfo = devList[0]
    print(devInfo)
    return devInfo
예제 #5
0
파일: show_cam.py 프로젝트: ty9071/suzly2
 def getUsbCamSns(self):
     DevList = mvsdk.CameraEnumerateDevice()
     nDev = len(DevList)
     print('Camera numbers:', nDev)
     if nDev < 1:
         print("No camera was found!")
         return
     DevInfo = 0
     for i in range(nDev):
         print("Cam %s=====================================" % (i))
         DevInfo = DevList[i]
         print("Devinfo:", DevInfo)
     if DevInfo == 0:
         print('color_cam_sn is wrong, can not get Devinfo, please check')
         return
예제 #6
0
 def run(self):
     while self.releaseCam == False:
         try:
             DevList = mvsdk.CameraEnumerateDevice()
             for Dev in DevList:
                 time.sleep(1)
                 #hCamera = mvsdk.CameraInit(Dev, -1, -1)
                 #  print(mvsdk.CameraIsOpened(Dev))
                 print(Dev)
                 mvsdk.CameraUnInit(Dev)
         #     print(tmp)
         #     time.sleep(0.5)
             self.releaseCam = True
             print('ReleaseCamera Done!', self.releaseCam)
         except mvsdk.CameraException as e:
             print("CameraInit Failed({}): {}".format(
                 e.error_code, e.message))
예제 #7
0
    def __init__(self, debug=True):

        #init the detection_node
        self.enemy_info_pub_ = rospy.Publisher("cmd_gimbal_angle",
                                               GimbalAngle,
                                               queue_size=10)
        self.enemy_flag_pub_ = rospy.Publisher("Detection/flag",
                                               String,
                                               queue_size=10)
        self.shoot_pub_ = rospy.Publisher("/cmd_shoot",
                                          ShootState,
                                          queue_size=10)
        rospy.init_node("armor_detection_node")

        self.fric_req = FricWhlRequest()
        self.fric_res = FricWhlResponse()

        # Multi-processing related definition
        self.parent_dt_pipe, self.frame_dt_pipe = mp.Pipe()
        #self.classifier = classifier
        self.status = 'DETECT'
        self.as_ = actionlib.SimpleActionServer("armor_detection_node_action",
                                                ArmorDetectionAction,
                                                execute_cb=self.ActionCB,
                                                auto_start=True)

        self.initialized = False
        #rospy.init_node('armor_detection_node')
        # Publishers
        #self.enemy_info_pub_ = rospy.Publisher("cmd_gimbal_angle",GimbalAngle,100)
        ##self.enemy_flag_pub_ = rospy.Publisher("Detection/flag",String,100)
        # Initialize Algorithm related module:
        # Tracking
        with open('detector_params.json', 'r') as file:
            self.params = json.load(file)

        #self.fps_count = 0
        # Detection
        self.det_thresholdcolor = utils.BGRImageProc(
            color=self.params['color'],
            threshs=self.params['bgr_threshs'],
            enable_debug=debug)
        self.det_grey = utils.GrayImageProc()

        self.det_getlightbars = utils.ScreenLightBars(
            thresh=self.params['bright_thresh'], enable_debug=debug)

        self.det_filterlightbars = utils.FilterLightBars(
            light_max_aspect_ratio=self.params['light_max_aspect_ratio'],
            light_min_area=self.params['light_min_area'],
            light_max_area=self.params['light_max_area'],
            light_min_aspect_ratio=self.params['light_min_aspect_ratio'],
            light_max_angle_abs=self.params['light_max_angle_abs'],
            enable_debug=debug)

        self.det_possiblearmors = utils.PossibleArmors(
            light_max_angle_diff=self.params['light_max_angle_diff'],
            armor_max_aspect_ratio=self.params['armor_max_aspect_ratio'],
            armor_min_area=self.params['armor_min_area'],
            armor_max_pixel_val=self.params['armor_max_pixel_val'],
            armor_center_min_color_average=self.
            params['armor_center_min_color_average'],
            enable_debug=debug)
        '''self.det_filterarmors = utils.FilterArmors(armor_max_stddev=self.params['armor_max_stddev'],
                                                    armor_max_mean=self.params['armor_max_mean'],
                                                    enable_debug=debug) '''

        self.det_selectarmor = utils.SelectFinalArmor(enable_debug=debug)

        self.det_armor2box = utils.Armor2Bbox

        #self.undetected_msg_published = False ---

        #self.detect_task = mp.Process(target=self.DetectionLoop)

        # Enemy pose information
        self.x = mp.Value('d', 0.0)
        self.y = mp.Value('d', 0.0)
        self.z = mp.Value('d', 0.0)
        #self.gimbal_angle = GimbalAngle() ---
        self.gimbal_angle = GimbalAngle()
        #self.detected_flag= String()
        self.detected_flag = String()

        self.shoot = ShootState()
        # Open Camera
        DevList = mvsdk.CameraEnumerateDevice()
        if len(DevList) < 1:
            raise IOError('No device found')
        self.camera = utils.Camera(30, DevList[0])
예제 #8
0
def main():
	# 枚举相机
	DevList = mvsdk.CameraEnumerateDevice()
	nDev = len(DevList)
	if nDev < 1:
		print("No camera was found!")
		return

	DevInfo = DevList[0]
	print(DevInfo)

	# 打开相机
	hCamera = 0
	try:
		hCamera = mvsdk.CameraInit(DevInfo, -1, -1)
	except mvsdk.CameraException as e:
		print("CameraInit Failed({}): {}".format(e.error_code, e.message) )
		return

	# 获取相机特性描述
	cap = mvsdk.CameraGetCapability(hCamera)
	PrintCapbility(cap)

	# 判断是黑白相机还是彩色相机
	monoCamera = (cap.sIspCapacity.bMonoSensor != 0)

	# 黑白相机让ISP直接输出MONO数据,而不是扩展成R=G=B的24位灰度
	if monoCamera:
		mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8)

	# 相机模式切换成连续采集
	mvsdk.CameraSetTriggerMode(hCamera, 0)

	# 手动曝光,曝光时间30ms
	mvsdk.CameraSetAeState(hCamera, 0)
	mvsdk.CameraSetExposureTime(hCamera, 30 * 1000)

	# 让SDK内部取图线程开始工作
	mvsdk.CameraPlay(hCamera)

	# 计算RGB buffer所需的大小,这里直接按照相机的最大分辨率来分配
	FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * (1 if monoCamera else 3)

	# 分配RGB buffer,用来存放ISP输出的图像
	# 备注:从相机传输到PC端的是RAW数据,在PC端通过软件ISP转为RGB数据(如果是黑白相机就不需要转换格式,但是ISP还有其它处理,所以也需要分配这个buffer)
	pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16)

	# 从相机取一帧图片
	try:
		pRawData, FrameHead = mvsdk.CameraGetImageBuffer(hCamera, 2000)
		mvsdk.CameraImageProcess(hCamera, pRawData, pFrameBuffer, FrameHead)
		mvsdk.CameraReleaseImageBuffer(hCamera, pRawData)
		
		# 此时图片已经存储在pFrameBuffer中,对于彩色相机pFrameBuffer=RGB数据,黑白相机pFrameBuffer=8位灰度数据
		# 该示例中我们只是把图片保存到硬盘文件中
		status = mvsdk.CameraSaveImage(hCamera, "z:\\grab.bmp", pFrameBuffer, FrameHead, 
			mvsdk.FILE_BMP_8BIT if monoCamera else mvsdk.FILE_BMP, 100)
		if status == mvsdk.CAMERA_STATUS_SUCCESS:
			print("Save image successfully. image_size = {}X{}".format(FrameHead.iWidth, FrameHead.iHeight) )
		else:
			print("Save image failed. err={}".format(status) )
	except mvsdk.CameraException as e:
		print("CameraGetImageBuffer failed({}): {}".format(e.error_code, e.message) )

	# 关闭相机
	mvsdk.CameraUnInit(hCamera)

	# 释放帧缓存
	mvsdk.CameraAlignFree(pFrameBuffer)
예제 #9
0
파일: camera2.py 프로젝트: ustc-mbit/time
    def run(self):

        #相机的获取与运行
        DevList = mvsdk.CameraEnumerateDevice()
        nDev = len(DevList)
        if nDev < 1:
            print("No camera was found!")
            return

        DevInfo = DevList[0]
        print(DevInfo)

        # 打开相机
        hCamera = 0
        try:
            hCamera = mvsdk.CameraInit(DevInfo, -1, -1)
        except mvsdk.CameraException as e:
            print("CameraInit Failed({}): {}".format(e.error_code, e.message))
            return

        # 获取相机特性描述
        cap = mvsdk.CameraGetCapability(hCamera)

        # 判断是黑白相机还是彩色相机
        monoCamera = (cap.sIspCapacity.bMonoSensor != 0)

        # 黑白相机让ISP直接输出MONO数据,而不是扩展成R=G=B的24位灰度
        if monoCamera:
            mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8)

        # 相机模式切换成连续采集
        mvsdk.CameraSetTriggerMode(hCamera, 0)

        # 手动曝光,曝光时间30ms
        mvsdk.CameraSetAeState(hCamera, 0)
        mvsdk.CameraSetExposureTime(hCamera, 30 * 1000)

        # 让SDK内部取图线程开始工作
        mvsdk.CameraPlay(hCamera)

        # 计算RGB buffer所需的大小,这里直接按照相机的最大分辨率来分配
        FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * (1 if monoCamera else 3)

        # 分配RGB buffer,用来存放ISP输出的图像
        # 备注:从相机传输到PC端的是RAW数据,在PC端通过软件ISP转为RGB数据(如果是黑白相机就不需要转换格式,但是ISP还有其它处理,所以也需要分配这个buffer)
        pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16)
        while True:

                pRawData, FrameHead = mvsdk.CameraGetImageBuffer(hCamera, 200)
                mvsdk.CameraImageProcess(hCamera, pRawData, pFrameBuffer, FrameHead)
                mvsdk.CameraReleaseImageBuffer(hCamera, pRawData)

                # 此时图片已经存储在pFrameBuffer中,对于彩色相机pFrameBuffer=RGB数据,黑白相机pFrameBuffer=8位灰度数据
                # 把pFrameBuffer转换成opencv的图像格式以进行后续算法处理
                frame_data = (mvsdk.c_ubyte * FrameHead.uBytes).from_address(pFrameBuffer)

                frame = np.frombuffer(frame_data, dtype=np.uint8)
                frame = frame.reshape((FrameHead.iHeight, FrameHead.iWidth,
                                       1 if FrameHead.uiMediaType == mvsdk.CAMERA_MEDIA_TYPE_MONO8 else 3))
                #将图片转为QT格式的
                rawImage = QImage(frame.data, frame.shape[1], frame.shape[0], QImage.Format_Indexed8)
                self.changePixmap.emit(rawImage) #显示图片
                #是否相机设置发生更改
                if self.isCameraSettingChanged:
                    print("CameraSettingChanged")
                    #修改曝光时间
                    print("修改后的曝光时间",self.Exposuretime)
                    mvsdk.CameraSetExposureTime(hCamera, self.Exposuretime * 1000)
                    #修改FPS

                    #修改增益
                    print("修改后的增益", self.Gain)
                    mvsdk.CameraSetAnalogGain(hCamera, self.Gain)

                    #修改完毕
                    self.isCameraSettingChanged=0
                #是否截图 单张截图
                if self.isCapture:
                    print("capturePicture")
                    #获取当前时间作为图片名称
                    PictureName = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
                    print(PictureName)
                    status = mvsdk.CameraSaveImage(hCamera, "D:\\"+PictureName+".bmp", pFrameBuffer, FrameHead,
                                                   mvsdk.FILE_BMP_8BIT if monoCamera else mvsdk.FILE_BMP, 100)
                    print("capturePicture")

                    if status == mvsdk.CAMERA_STATUS_SUCCESS:
                        print("Save image successfully. image_size = {}X{}".format(FrameHead.iWidth,
                                                                                   FrameHead.iHeight))
                    else:
                        print("Save image failed. err={}".format(status))
                self.isCapture = 0
예제 #10
0
def main_loop():
    # 枚举相机
    DevList = mvsdk.CameraEnumerateDevice()
    nDev = len(DevList)
    if nDev < 1:
        print("No camera was found!")
        return

    DevInfo = DevList[0]
    print(DevInfo)

    # 打开相机
    hCamera = 0
    try:
        hCamera = mvsdk.CameraInit(DevInfo, -1, -1)
    except mvsdk.CameraException as e:
        print("CameraInit Failed({}): {}".format(e.error_code, e.message))
        return

    # 获取相机特性描述
    cap = mvsdk.CameraGetCapability(hCamera)

    # 判断是黑白相机还是彩色相机
    monoCamera = (cap.sIspCapacity.bMonoSensor != 0)

    # 黑白相机让ISP直接输出MONO数据,而不是扩展成R=G=B的24位灰度
    if monoCamera:
        mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8)

    # 相机模式切换成软触发采集 软触发是1
    mvsdk.CameraSetTriggerMode(hCamera, 1)

    # 获取相机的触发模式
    mvsdk.CameraGetTriggerMode(hCamera)

    # 设置闪光灯STROBE信号的模式 0是自动,不能设置延迟时间,和脉冲宽度,1是手动
    mvsdk.CameraSetStrobeMode(hCamera, 1)

    # 设置高低电平模式
    mvsdk.CameraSetStrobePolarity(hCamera, 1)
    mvsdk.CameraGetStrobePolarity(hCamera)

    # # 设置延迟时间
    mvsdk.CameraSetStrobeDelayTime(hCamera, 0)
    #
    # # 设置脉冲宽度,单位是微秒 和相机曝光时间相等
    mvsdk.CameraSetStrobePulseWidth(hCamera, 1 * 1000)

    # 手动曝光,设置曝光时间,(exposureTime的单位是微秒)
    mvsdk.CameraSetAeState(hCamera, 0)
    mvsdk.CameraSetExposureTime(hCamera, 1 * 1000)
    mvsdk.CameraGetExposureTime(hCamera)

    # 设置增益,获得增益
    mvsdk.CameraSetAnalogGain(hCamera, 8)
    mvsdk.CameraGetAnalogGain(hCamera)

    # 设置相机的帧率,并返回帧率大小
    # mvsdk.CameraSetFrameSpeed(hCamera,1 )
    # mvsdk.CameraGetFrameSpeed(hCamera)

    # 让SDK内部取图线程开始工作
    mvsdk.CameraPlay(hCamera)

    # 计算RGB buffer所需的大小,这里直接按照相机的最大分辨率来分配
    FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * (
        1 if monoCamera else 3)

    # 分配RGB buffer,用来存放ISP输出的图像
    # 备注:从相机传输到PC端的是RAW数据,在PC端通过软件ISP转为RGB数据(如果是黑白相机就不需要转换格式,但是ISP还有其它处理,所以也需要分配这个buffer)
    pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16)

    while (cv2.waitKey(1) & 0xFF) != ord('q'):
        # 从相机取一帧图片
        start = test.clock()
        try:
            mvsdk.CameraSoftTrigger(hCamera)
            pRawData, FrameHead = mvsdk.CameraGetImageBuffer(hCamera, 200)
            mvsdk.CameraImageProcess(hCamera, pRawData, pFrameBuffer,
                                     FrameHead)
            mvsdk.CameraReleaseImageBuffer(hCamera, pRawData)

            # 此时图片已经存储在pFrameBuffer中,对于彩色相机pFrameBuffer=RGB数据,黑白相机pFrameBuffer=8位灰度数据
            # 把pFrameBuffer转换成opencv的图像格式以进行后续算法处理
            frame_data = (mvsdk.c_ubyte *
                          FrameHead.uBytes).from_address(pFrameBuffer)
            frame = np.frombuffer(frame_data, dtype=np.uint8)
            frame = frame.reshape((FrameHead.iHeight, FrameHead.iWidth,
                                   1 if FrameHead.uiMediaType
                                   == mvsdk.CAMERA_MEDIA_TYPE_MONO8 else 3))
            cv2.namedWindow('capture', cv2.WINDOW_NORMAL)
            cv2.resizeWindow('capture', 1600, 1200)
            frame = cv2.flip(frame, 0)
            # frame = cv2.GaussianBlur(frame,(5,5),5)
            # frame = cv2.medianBlur(frame,5)
            # frame = cv2.bilateralFilter(frame,9,75,75)
            cv2.imshow("capture", frame)
            end = test.clock()
            print('一帧', end - start)

        except mvsdk.CameraException as e:
            if e.error_code != mvsdk.CAMERA_STATUS_TIME_OUT:
                print("CameraGetImageBuffer failed({}): {}".format(
                    e.error_code, e.message))

    # 关闭相机
    mvsdk.CameraUnInit(hCamera)

    # 释放帧缓存
    mvsdk.CameraAlignFree(pFrameBuffer)
예제 #11
0
def main_loop():
    l = 50
    t = 50
    fx = 0.4
    fy = 0.4
    # 枚举相机
    DevList = mvsdk.CameraEnumerateDevice()
    nDev = len(DevList)
    if nDev < 1:
        print("No camera was found!")
        return

    DevInfo = DevList[0]
    print(DevInfo)

    # 打开相机
    hCamera = 0
    try:
        hCamera = mvsdk.CameraInit(DevInfo, -1, -1)
    except mvsdk.CameraException as e:
        print("CameraInit Failed({}): {}".format(e.error_code, e.message))
        return

    # 获取相机特性描述
    cap = mvsdk.CameraGetCapability(hCamera)

    # 判断是黑白相机还是彩色相机
    monoCamera = (cap.sIspCapacity.bMonoSensor != 0)

    # 黑白相机让ISP直接输出MONO数据,而不是扩展成R=G=B的24位灰度
    if monoCamera:
        mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8)

    # 相机模式切换成连续采集
    mvsdk.CameraSetTriggerMode(hCamera, 0)

    # 手动曝光,曝光时间40ms
    mvsdk.CameraSetAeState(hCamera, 0)
    mvsdk.CameraSetExposureTime(hCamera, 30 * 1000)

    # # #手动增益
    # print('相机增益', mvsdk.CameraGetGain(hCamera))
    # #RGB的每个通道增益
    mvsdk.CameraSetGain(hCamera, 200, 200, 200)

    # 让SDK内部取图线程开始工作
    mvsdk.CameraPlay(hCamera)

    # 计算RGB buffer所需的大小,这里直接按照相机的最大分辨率来分配
    FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * (
        1 if monoCamera else 3)

    # 分配RGB buffer,用来存放ISP输出的图像
    # 备注:从相机传输到PC端的是RAW数据,在PC端通过软件ISP转为RGB数据(如果是黑白相机就不需要转换格式,但是ISP还有其它处理,所以也需要分配这个buffer)
    pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16)

    while (1):
        # 从相机取一帧图片
        try:
            pRawData, FrameHead = mvsdk.CameraGetImageBuffer(hCamera, 200)
            mvsdk.CameraImageProcess(hCamera, pRawData, pFrameBuffer,
                                     FrameHead)
            mvsdk.CameraReleaseImageBuffer(hCamera, pRawData)

            # 此时图片已经存储在pFrameBuffer中,对于彩色相机pFrameBuffer=RGB数据,黑白相机pFrameBuffer=8位灰度数据
            # 把pFrameBuffer转换成opencv的图像格式以进行后续算法处理
            frame_data = (mvsdk.c_ubyte *
                          FrameHead.uBytes).from_address(pFrameBuffer)
            frame = np.frombuffer(frame_data, dtype=np.uint8)
            frame = frame.reshape((FrameHead.iHeight, FrameHead.iWidth,
                                   1 if FrameHead.uiMediaType
                                   == mvsdk.CAMERA_MEDIA_TYPE_MONO8 else 3))
            # # 实时显示
            # cv2.imshow('frame', frame)
            #临时输出距离均值
            #BGR
            X = cv2.pyrDown(cv2.pyrDown(frame))
            #保存
            cv2.imwrite(
                r'E:\pycharm\camera\navigationWithZ\camera_develop\result\capture\ori.bmp',
                frame)
            # #图片翻转
            # frame=cv2.flip(frame,0)
            # #旋转
            w, h = frame.shape[:2]
            # print(w,h)
            M = cv2.getRotationMatrix2D((h / 2, w / 2), 180, 1)
            frame = cv2.warpAffine(frame, M, (h, w))
            # cv2.imshow('capture1',frame)
            # cv2.waitKey(0)
            # print(frame,type(frame),frame.shape)
            #深度学习框架调用
            # frame=deep_convert(frame)
            #矩阵框架
            #滤波
            frame = cv2.GaussianBlur(frame, (0, 0), 1.8)
            # frame=cv2.blur(frame,(5,5))
            # frame=cv2.medianBlur(frame,9)
            # frame=cv2.bilateralFilter(frame,d=0,sigmaColor=50,sigmaSpace=15)
            frame = image_input(frame)

            #临时计算距离平均
            Y = cv2.pyrDown(cv2.pyrDown(frame))
            #保存
            cv2.imwrite(
                r'E:\pycharm\camera\navigationWithZ\camera_develop\result\capture\correct.bmp',
                frame)
            #显示距离均值
            t = np.sum(((X - Y)**2)**0.5, axis=2)
            print('距离均值', np.mean(t), '距离方差', np.var(t))
            # 图片翻转
            # frame = cv2.flip(frame, 1)
            #实时显示
            #滤波
            # frame=cv2.GaussianBlur(frame,(0,0),1.8)
            # frame=cv2.blur(frame,(5,5))

            #不加投影仪时显示使用
            # frame=cv2.medianBlur(frame,3)
            # frame=cv2.bilateralFilter(frame,d=0,sigmaSpace=15,sigmaColor=100)
            # cv2.imshow('frame',frame)
            # cv2.waitKey( )

            # #相机投影之间的矫正关系
            # fx=0.4
            # fy=0.4
            # switch(waitKey(1))
            # {
            #   fx=
            #   fy=
            #    l=
            #    t=
            # default: break;
            # }
            # frame=cv2.resize(frame,None,fx=fx,fy=fy)

            #贴到背景图
            w, h, d = frame.shape
            print('shape', w, h, d)
            # frame=cv2.flip(frame,0)
            M = cv2.getRotationMatrix2D((h / 2, w / 2), 180, 1)
            frame = cv2.warpAffine(frame, M, (h, w))
            # print(w,h)
            # 起点 l,t
            # cv2.waitKey()
            # 			# t =raw_input("请输入")
            t = 50
            l = 50

            # ti = cv2.waitKey(33)

            # if ti == 49:
            # 	fx=fx+0.01
            # elif ti == 50:
            # 	fy = fy + 0.01
            # else:
            # 	fx=fx2
            #########
            # img = cv2.imread('360.png')
            # print(img.shape)
            # img_copy = deepcopy(frame)
            # img2 = np.zeros([800, 1280, 3], np.uint8)
            # # img2 = np.zeros((1024, 1920, 3), np.uint8)
            # img2_copy = deepcopy(img2)
            # cv2.namedWindow('image_cp',cv2.WINDOW_NORMAL)
            # cv2.resizeWindow('image_cp',640,480)
            # cv2.namedWindow('image', cv2.WINDOW_NORMAL)
            # cv2.resizeWindow('image', 640, 480)
            # cv2.namedWindow('image2', cv2.WINDOW_NORMAL)
            # cv2.resizeWindow('image2', 640, 480)
            # cv2.namedWindow('image2_cp',cv2.WINDOW_NORMAL)
            # cv2.resizeWindow('image2_cp',640,480)
            # cv2.setMouseCallback('image_0 ', draw_circle)
            # cv2.setMouseCallback('image2', copy_circle)
            ##

            # img_0[l:l + w, t:t + h, 0] = frame[:, :, 0]
            # img_0[l:l + w, t:t + h, 1] = frame[:, :, 1]
            # img_0[l:l + w, t:t + h, 2] = frame[:, :, 2]

            # img_copy = deepcopy(img_0)
            # img2 = np.zeros([800, 1280, 3], np.uint8)
            # img2 = np.zeros((1024, 1920, 3), np.uint8)
            # img2_copy = deepcopy(img2)
            frame = cv2.resize(frame, (1280, 800))
            dst = cv2.add(frame, img_3)
            cv2.imshow('projector0', dst)

            # 图像放大与平移
            img2 = img2[0:600, 0:1000, 3]

            # 图像缩小(加黑边)
            cv2.copyMakeBorder(img2, 0, 0, 0, 0, cv2.BORDER_ISOLATED, img2,
                               (0, 0, 0))

            cv2.imshow('projector1', img2)
            k = cv2.waitKey(1) & 0xFF
            if k == ord('m'):
                mode = not mode
            elif k == ord('q'):
                break
            # img2[l:l + w, t:t + h, 0] = frame[:, :, 0]
            # img2[l:l + w, t:t + h, 1] = frame[:, :, 1]
            # img2[l:l + w, t:t + h, 2] = frame[:, :, 2]
            # cv2.imshow('projector', img2)
            # frame1[l:l + w, t:t + h, 0] = frame[:, :, 0]
            # frame1[l:l + w, t:t + h, 1] = frame[:, :, 1]
            # frame1[l:l + w, t:t + h, 2] = frame[:, :, 2]
            # cv2.imshow('projector', frame1)
            #########
            # # img = cv2.imread('360.png')
            # # print(img.shape)
            # img_copy = deepcopy(img_0)
            # img2 = np.zeros([800, 1280, 3], np.uint8)
            # # img2 = np.zeros((1024, 1920, 3), np.uint8)
            # img2_copy = deepcopy(img2)
            # # cv2.namedWindow('image_cp',cv2.WINDOW_NORMAL)
            # # cv2.resizeWindow('image_cp',640,480)
            # # cv2.namedWindow('image', cv2.WINDOW_NORMAL)
            # # cv2.resizeWindow('image', 640, 480)
            # # cv2.namedWindow('image2', cv2.WINDOW_NORMAL)
            # # cv2.resizeWindow('image2', 640, 480)
            # # cv2.namedWindow('image2_cp',cv2.WINDOW_NORMAL)
            # # cv2.resizeWindow('image2_cp',640,480)
            # cv2.waitKey(200)
            # ##
            # cv2.setMouseCallback('image_0 ', draw_circle)
            # cv2.setMouseCallback('image2', copy_circle)

        except mvsdk.CameraException as e:
            if e.error_code != mvsdk.CAMERA_STATUS_TIME_OUT:
                print("CameraGetImageBuffer failed({}): {}".format(
                    e.error_code, e.message))

    # 关闭相机
    mvsdk.CameraUnInit(hCamera)

    # 释放帧缓存
    mvsdk.CameraAlignFree(pFrameBuffer)
예제 #12
0
def main():
    # 枚举相机
    DevList = mvsdk.CameraEnumerateDevice()
    nDev = len(DevList)
    if nDev < 1:
        print("No camera was found!")
        return

    for i, DevInfo in enumerate(DevList):
        print("{}: {} {}".format(i, DevInfo.GetFriendlyName(),
                                 DevInfo.GetPortType()))
    i = 0 if nDev == 1 else int(input("Select camera: "))
    DevInfo = DevList[i]
    print(DevInfo)

    # 打开相机
    hCamera = 0
    try:
        hCamera = mvsdk.CameraInit(DevInfo, -1, -1)
    except mvsdk.CameraException as e:
        print("CameraInit Failed({}): {}".format(e.error_code, e.message))
        return

    # 获取相机特性描述
    cap = mvsdk.CameraGetCapability(hCamera)
    # PrintCapbility(cap)

    # 判断是黑白相机还是彩色相机
    monoCamera = (cap.sIspCapacity.bMonoSensor != 0)

    # 黑白相机让ISP直接输出MONO数据,而不是扩展成R=G=B的24位灰度
    if monoCamera:
        mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8)
    else:
        mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_BGR8)

    # 相机模式切换成连续采集
    mvsdk.CameraSetTriggerMode(hCamera, 0)

    # 手动曝光,曝光时间30ms
    mvsdk.CameraSetAeState(hCamera, 0)
    mvsdk.CameraSetExposureTime(hCamera, 100 * 1000)

    # 让SDK内部取图线程开始工作
    mvsdk.CameraPlay(hCamera)

    # 计算RGB buffer所需的大小,这里直接按照相机的最大分辨率来分配
    FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * (
        1 if monoCamera else 3)

    # 分配RGB buffer,用来存放ISP输出的图像
    # 备注:从相机传输到PC端的是RAW数据,在PC端通过软件ISP转为RGB数据(如果是黑白相机就不需要转换格式,但是ISP还有其它处理,所以也需要分配这个buffer)
    pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16)

    savetime = 0

    img_list = glob.glob("./img/*.jpg")
    if len(img_list) != 0:
        num_img = len(img_list)
    else:
        num_img = 0

    ### 模型加载
    image_size = "112,112"
    model_path = "../../insightface/models/model-y1-test2/model,0"
    ga_model_path = "../../insightface/models/gamodel-r50/model,0"
    args = ReadArgs(image_size, model_path, ga_model_path)
    model = face_model.FaceModel(args)

    ## 开始抓拍
    try:

        # this script can't be stopped by ctrl+c or quit automatically if met
        # with any error, probably because of some multi threads process.
        while (True):
            # print("ent loop")
            # 从相机取一帧图片
            try:
                # print("get buffer")
                # pass
                pRawData, FrameHead = mvsdk.CameraGetImageBuffer(hCamera, 2000)
                mvsdk.CameraImageProcess(hCamera, pRawData, pFrameBuffer,
                                         FrameHead)
                mvsdk.CameraReleaseImageBuffer(hCamera, pRawData)
                pFrameBuffer, piWidth, piHeight = mvsdk.CameraGetImageBufferEx(
                    hCamera, 2000)

                # 此时图片已经存储在pFrameBuffer中,对于彩色相机pFrameBuffer=RGB数据,黑白相机pFrameBuffer=8位灰度数据
                # 该示例中我们只是把图片保存到硬盘文件中
                if time.time() - savetime >= 1:
                    # 存下一张图片
                    img_name = "./img/grab_{}.jpg".format(num_img)
                    status = mvsdk.CameraSaveImage(hCamera, img_name,
                                                   pFrameBuffer, FrameHead,
                                                   mvsdk.FILE_JPG, 100)
                    if status == mvsdk.CAMERA_STATUS_SUCCESS:
                        print("Save image successfully. image_size = {}X{}".
                              format(FrameHead.iWidth, FrameHead.iHeight))
                    else:
                        print("Save image failed. err={}".format(status))

                    # 调用cpp读入刚存的图片,展开和去畸变,最好把图像做参数直接传进去,把展开图直接传出来
                    out_path = "./result/{}.jpg".format(num_img)
                    os.system(unfold + " " + out_path + " " + img_name)
                    print("Unfold image successfully.")

                    # 调用insightface进行人脸检测,存下截取到的人脸图像
                    raw_img = cv2.imread(out_path)
                    try:
                        face_img = model.get_input(raw_img)
                        face_path = "./face/{}.jpg".format(num_img)
                        # the channels of face_img is adapted to fit the network input
                        # so we should use a np.transpose here
                        face_img = np.transpose(face_img, [1, 2, 0])
                        face_img = cv2.cvtColor(face_img, cv2.COLOR_BGR2RGB)
                        cv2.imwrite(face_path, face_img)
                        print("Face detected and saved successfully.")
                    except:
                        pass
                    # 实时获取人脸特征向量,对比确认身份
                    # feature = model.get_feature(face_img)

                    # 将图像存入对应人的文件夹

                    # status = mvsdk.CameraSaveImage(hCamera, img_name, pFrameBuffer, FrameHead, mvsdk.FILE_JPG, 100)
                    # if status == mvsdk.CAMERA_STATUS_SUCCESS:
                    # 	print("Save image successfully. image_size = {}X{}".format(FrameHead.iWidth, FrameHead.iHeight) )
                    # else:
                    # 	print("Save image failed. err={}".format(status) )
                    savetime = time.time()
                    num_img += 1
                else:
                    continue
            except mvsdk.CameraException as e:
                print("CameraGetImageBuffer failed({}): {}".format(
                    e.error_code, e.message))
    except KeyboardInterrupt:
        print("KeyboardInterrupt to exit.")
        # pass
        # 关闭相机
        mvsdk.CameraUnInit(hCamera)
예제 #13
0
    def main(self):
        # 枚举相机
        DevList = mvsdk.CameraEnumerateDevice()
        nDev = len(DevList)
        if nDev < 1:
            print("No camera was found!")
            return

        for i, DevInfo in enumerate(DevList):
            print("{}: {} {}".format(i, DevInfo.GetFriendlyName(),
                                     DevInfo.GetPortType()))
        i = 0 if nDev == 1 else int(input("Select camera: "))
        DevInfo = DevList[i]
        print(DevInfo)

        # 打开相机
        hCamera = 0
        try:
            hCamera = mvsdk.CameraInit(DevInfo, -1, -1)
        except mvsdk.CameraException as e:
            print("CameraInit Failed({}): {}".format(e.error_code, e.message))
            return

        # 获取相机特性描述
        cap = mvsdk.CameraGetCapability(hCamera)

        # 判断是黑白相机还是彩色相机
        monoCamera = (cap.sIspCapacity.bMonoSensor != 0)

        # 黑白相机让ISP直接输出MONO数据,而不是扩展成R=G=B的24位灰度
        if monoCamera:
            mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8)
        else:
            mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_BGR8)

        # 相机模式切换成连续采集
        mvsdk.CameraSetTriggerMode(hCamera, 0)

        # 手动曝光,曝光时间30ms
        mvsdk.CameraSetAeState(hCamera, 0)
        mvsdk.CameraSetExposureTime(hCamera, 30 * 1000)

        # 让SDK内部取图线程开始工作
        mvsdk.CameraPlay(hCamera)

        # 计算RGB buffer所需的大小,这里直接按照相机的最大分辨率来分配
        FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * (
            1 if monoCamera else 3)

        # 分配RGB buffer,用来存放ISP输出的图像
        # 备注:从相机传输到PC端的是RAW数据,在PC端通过软件ISP转为RGB数据(如果是黑白相机就不需要转换格式,但是ISP还有其它处理,所以也需要分配这个buffer)
        self.pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16)

        # 设置采集回调函数
        self.quit = False
        mvsdk.CameraSetCallbackFunction(hCamera, self.GrabCallback, 0)

        # 等待退出
        while not self.quit:
            time.sleep(0.1)

        # 关闭相机
        mvsdk.CameraUnInit(hCamera)

        # 释放帧缓存
        mvsdk.CameraAlignFree(self.pFrameBuffer)
예제 #14
0
def main_loop():
    # 枚举相机
    DevList = mvsdk.CameraEnumerateDevice()
    nDev = len(DevList)
    if nDev < 1:
        print("No camera was found!")
        return

    DevInfo = DevList[0]
    print(DevInfo)

    # 打开相机
    hCamera = 0
    try:
        hCamera = mvsdk.CameraInit(DevInfo, -1, -1)
    except mvsdk.CameraException as e:
        print("CameraInit Failed({}): {}".format(e.error_code, e.message))
        return

    # 获取相机特性描述
    cap = mvsdk.CameraGetCapability(hCamera)

    # 判断是黑白相机还是彩色相机
    monoCamera = (cap.sIspCapacity.bMonoSensor != 0)

    # 黑白相机让ISP直接输出MONO数据,而不是扩展成R=G=B的24位灰度
    if monoCamera:
        mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8)

    # 相机模式切换  连续采集0,软触发1,硬触发2
    mvsdk.CameraSetTriggerMode(hCamera, 2)

    mvsdk.CameraGetTriggerMode(hCamera)

    # 设置并获取硬触发信号种类  0上升沿触发 1下降沿 2高电平 3低电平
    mvsdk.CameraSetExtTrigSignalType(hCamera, 1)

    mvsdk.CameraGetExtTrigSignalType(hCamera)

    # 设置并获取相机外触发模式下的触发延迟时间,单位是微秒 当硬触发信号来临后,经过指定的延时,再开始采集图像。
    mvsdk.CameraSetTriggerDelayTime(hCamera, 0)

    mvsdk.CameraGetTriggerDelayTime(hCamera)

    # 另一种设置外触发信号延迟时间的函数
    # mvsdk.CameraSetExtTrigDelayTime(hCamera, 0)

    # mvsdk.CameraGetExtTrigDelayTime(hCamera)

    # 设置触发模式下  一次触发的帧数 默认为1帧
    # mvsdk.CameraSetTriggerCount(hCamera, 1)

    # mvsdk.CameraGetTriggerCount(hCamera)

    # 手动曝光,曝光时间30ms
    # mvsdk.CameraSetAeState(hCamera, 0)
    # mvsdk.CameraSetExposureTime(hCamera, 30 * 1000)

    # 让SDK内部取图线程开始工作
    mvsdk.CameraPlay(hCamera)

    # 计算RGB buffer所需的大小,这里直接按照相机的最大分辨率来分配
    FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * (
        1 if monoCamera else 3)

    # 分配RGB buffer,用来存放ISP输出的图像
    # 备注:从相机传输到PC端的是RAW数据,在PC端通过软件ISP转为RGB数据(如果是黑白相机就不需要转换格式,但是ISP还有其它处理,所以也需要分配这个buffer)
    pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16)

    while (cv2.waitKey(1) & 0xFF) != ord('q'):
        # 从相机取一帧图片
        try:

            pRawData, FrameHead = mvsdk.CameraGetImageBuffer(hCamera, 200)
            mvsdk.CameraImageProcess(hCamera, pRawData, pFrameBuffer,
                                     FrameHead)
            mvsdk.CameraReleaseImageBuffer(hCamera, pRawData)

            # 此时图片已经存储在pFrameBuffer中,对于彩色相机pFrameBuffer=RGB数据,黑白相机pFrameBuffer=8位灰度数据
            # 把pFrameBuffer转换成opencv的图像格式以进行后续算法处理
            frame_data = (mvsdk.c_ubyte *
                          FrameHead.uBytes).from_address(pFrameBuffer)
            frame = np.frombuffer(frame_data, dtype=np.uint8)
            frame = frame.reshape((FrameHead.iHeight, FrameHead.iWidth,
                                   1 if FrameHead.uiMediaType
                                   == mvsdk.CAMERA_MEDIA_TYPE_MONO8 else 3))

            cv2.imshow("capture", frame)

        except mvsdk.CameraException as e:
            if e.error_code != mvsdk.CAMERA_STATUS_TIME_OUT:
                print("CameraGetImageBuffer failed({}): {}".format(
                    e.error_code, e.message))

    # 关闭相机
    mvsdk.CameraUnInit(hCamera)

    # 释放帧缓存
    mvsdk.CameraAlignFree(pFrameBuffer)
예제 #15
0
파일: cv_grab_2.py 프로젝트: ustc-mbit/time
def camera():
    # 枚举相机
    img = 0
    DevList = mvsdk.CameraEnumerateDevice()
    nDev = len(DevList)
    if nDev < 1:
        print("No camera was found!")
        return

    DevInfo = DevList[0]
    print(DevInfo)

    # 打开相机
    hCamera = 0
    try:
        hCamera = mvsdk.CameraInit(DevInfo, -1, -1)
    except mvsdk.CameraException as e:
        print("CameraInit Failed({}): {}".format(e.error_code, e.message))
        return

    # 获取相机特性描述
    cap = mvsdk.CameraGetCapability(hCamera)

    # 判断是黑白相机还是彩色相机
    monoCamera = (cap.sIspCapacity.bMonoSensor != 0)

    # 黑白相机让ISP直接输出MONO数据,而不是扩展成R=G=B的24位灰度
    if monoCamera:
        mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8)

    # 相机模式切换成连续采集
    mvsdk.CameraSetTriggerMode(hCamera, 0)

    # 手动曝光,曝光时间30ms
    mvsdk.CameraSetAeState(hCamera, 0)
    mvsdk.CameraSetExposureTime(hCamera, 40 * 1000)

    # # #手动增益

    mvsdk.CameraSetGain(hCamera, 200, 200, 200)
    print(mvsdk.CameraGetGain(hCamera))

    # 让SDK内部取图线程开始工作
    mvsdk.CameraPlay(hCamera)

    # 计算RGB buffer所需的大小,这里直接按照相机的最大分辨率来分配
    FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * (
        1 if monoCamera else 3)

    # 分配RGB buffer,用来存放ISP输出的图像
    # 备注:从相机传输到PC端的是RAW数据,在PC端通过软件ISP转为RGB数据(如果是黑白相机就不需要转换格式,但是ISP还有其它处理,所以也需要分配这个buffer)
    pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16)

    while (cv2.waitKey(1) & 0xFF) != ord('q'):
        # 从相机取一帧图片
        try:
            pRawData, FrameHead = mvsdk.CameraGetImageBuffer(hCamera, 200)
            mvsdk.CameraImageProcess(hCamera, pRawData, pFrameBuffer,
                                     FrameHead)
            mvsdk.CameraReleaseImageBuffer(hCamera, pRawData)

            # 此时图片已经存储在pFrameBuffer中,对于彩色相机pFrameBuffer=RGB数据,黑白相机pFrameBuffer=8位灰度数据
            # 把pFrameBuffer转换成opencv的图像格式以进行后续算法处理
            frame_data = (mvsdk.c_ubyte *
                          FrameHead.uBytes).from_address(pFrameBuffer)
            frame = np.frombuffer(frame_data, dtype=np.uint8)
            frame = frame.reshape(
                (FrameHead.iHeight, FrameHead.iWidth,
                 1 if FrameHead.uiMediaType == mvsdk.CAMERA_MEDIA_TYPE_MONO8
                 else 3))  #numpy格式的图片
            # #旋转
            w, h = frame.shape[:2]
            # print(w,h)
            M = cv2.getRotationMatrix2D((h / 2, w / 2), 180, 1)
            frame_ = cv2.warpAffine(frame, M, (h, w))
            #翻转
            # frame=cv2.flip(frame,1)
            cv2.namedWindow('correct_capture', cv2.WINDOW_NORMAL)
            cv2.imshow("correct_capture", frame_)
            #返回结果
            c = cv2.waitKey(1)
            if c == 32:
                img = frame_
                break
        except mvsdk.CameraException as e:
            if e.error_code != mvsdk.CAMERA_STATUS_TIME_OUT:
                print("CameraGetImageBuffer failed({}): {}".format(
                    e.error_code, e.message))

    # 关闭相机
    mvsdk.CameraUnInit(hCamera)

    # 释放帧缓存
    mvsdk.CameraAlignFree(pFrameBuffer)

    #关闭窗口
    cv2.destroyWindow('correct_capture')
    return img
예제 #16
0
    thickness = 2
    cv2.putText(copy_image_ndarray, time_string, (10,50), cv2.FONT_HERSHEY_SIMPLEX, 1, bgr_color, thickness)
    return copy_image_ndarray


# 使用cv2库展示图片
def show_image(image_ndarray):  
    windowName = 'display'
    cv2.imshow(windowName, image_ndarray)


# 主函数
from sys import exit
if __name__ == '__main__':
    # 获取相机设备的信息
    device_list = mvsdk.CameraEnumerateDevice()
    if len(device_list) == 0:
        print('没有连接MindVision品牌的相机设备')
        exit()
    device_info = device_list[0]
    cameraIndex = mvsdk.CameraInit(device_info, -1, -1)
    # 开始用相机监控
    first_image_ndarray = None
    while True:
        second_image_ndarray = get_capturedImage(cameraIndex)
        drawed_image_ndarray = get_drawedDetectedImage(first_image_ndarray, second_image_ndarray)
        if drawed_image_ndarray is not None:
            show_image(drawed_image_ndarray)
        # 在展示图片后,等待1秒,接收按键
        pressKey = cv2.waitKey(1)
        # 按Esc键或者q键可以退出循环
예제 #17
0
def main_loop():
    # 枚举相机
    DevList = mvsdk.CameraEnumerateDevice()
    nDev = len(DevList)
    if nDev < 1:
        print("No camera was found!")
        return
    for i, DevInfo in enumerate(DevList):
        print("{}: {} {}".format(i, DevInfo.GetFriendlyName(),
                                 DevInfo.GetPortType()))
    i = 0 if nDev == 1 else int(input("Select camera: "))
    DevInfo = DevList[i]
    print(DevInfo)

    # 打开相机
    hCamera = 0
    try:
        hCamera = mvsdk.CameraInit(DevInfo, -1, -1)
    except mvsdk.CameraException as e:
        print("CameraInit Failed({}): {}".format(e.error_code, e.message))
        return

    # 获取相机特性描述
    cap = mvsdk.CameraGetCapability(hCamera)

    # 判断是黑白相机还是彩色相机
    monoCamera = (cap.sIspCapacity.bMonoSensor != 0)

    # 黑白相机让ISP直接输出MONO数据,而不是扩展成R=G=B的24位灰度
    if monoCamera:
        mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8)
    else:
        mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_COLOR)

    # 相机模式切换成连续采集
    mvsdk.CameraSetTriggerMode(hCamera, 0)

    # 手动曝光,曝光时间20ms
    mvsdk.CameraSetAeState(hCamera, 0)
    mvsdk.CameraSetExposureTime(hCamera, 5 * 1000)
    mvsdk.CameraSetSaturation(hCamera, 142)  #saturation
    mvsdk.CameraSetContrast(hCamera, 150)  #contrast
    mvsdk.CameraSetAnalogGain(hCamera, 7)  #Anolog gain
    mvsdk.CameraSetSharpness(hCamera, 150)  #sharpness
    mvsdk.CameraSetGain(hCamera, 144, 118, 100)  #color_gain
    mvsdk.CameraSetGamma(hCamera, 50)  #gamma
    #mvsdk.CameraSetWhiteLevel(hCamera, 1)#white balance
    # 让SDK内部取图线程开始工作
    mvsdk.CameraPlay(hCamera)
    # 计算RGB buffer所需的大小,这里直接按照相机的最大分辨率来分配
    FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * (
        1 if monoCamera else 3)
    # 分配RGB buffer,用来存放ISP输出的图像
    # 备注:从相机传输到PC端的是RAW数据,在PC端通过软件ISP转为RGB数据(如果是黑白相机就不需要转换格式,但是ISP还有其它处理,所以也需要分配这个buffer)
    pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16)
    count = 0
    time1 = time.time()
    a = 0
    cv2.namedWindow('Press q to end')
    cv2.createTrackbar('case1:', 'Press q to end', 0, 100, nothing)
    cv2.createTrackbar('case2:', 'Press q to end', 30, 100, nothing)
    cv2.createTrackbar('case3:', 'Press q to end', 109, 200, nothing)
    ax2 = ay2 = 0
    a2 = cv2.getTrackbarPos('Saturation:', 'Press q to end')
    hsvv = 100
    countc = 0
    countc2 = 0
    while (cv2.waitKey(1) & 0xFF) != ord('q'):
        count += 1
        portx = "/dev/ttyUSB0"
        bps = 115200  #超时设置,None:永远等待操作,0为立即返回
        ser = serial.Serial(portx, bps, timeout=1)
        b = cv2.getTrackbarPos('case1:', 'Press q to end')
        b2 = cv2.getTrackbarPos('case2:', 'Press q to end')
        b3 = cv2.getTrackbarPos('case3:', 'Press q to end')
        #mvsdk.CameraSetBlackLevel(hCamera,b)
        #mvsdk.CameraSetAnalogGain(hCamera,b2)
        mvsdk.CameraSetExposureTime(hCamera, 12 * 1000)
        #mvsdk.CameraSetAnalogGain(hCamera,0.5)
        if time.time() - time1 >= 1:
            time1 = time.time()
            print('frames:', count)
            print(a)
            count = 0
            # 从相机取一帧图片
        try:
            pRawData, FrameHead = mvsdk.CameraGetImageBuffer(hCamera, 200)
            mvsdk.CameraImageProcess(hCamera, pRawData, pFrameBuffer,
                                     FrameHead)
            mvsdk.CameraReleaseImageBuffer(hCamera, pRawData)
            # 此时图片已经存储在pFrameBuffer中,对于彩色相机pFrameBuffer=RGB数据,黑白相机pFrameBuffer=8位灰度数据
            # 把pFrameBuffer转换成opencv的图像格式以进行后续算法处理
            frame_data = (mvsdk.c_ubyte *
                          FrameHead.uBytes).from_address(pFrameBuffer)
            frame = np.frombuffer(frame_data, dtype=np.uint8)
            frame = frame.reshape((FrameHead.iHeight, FrameHead.iWidth,
                                   1 if FrameHead.uiMediaType
                                   == mvsdk.CAMERA_MEDIA_TYPE_MONO8 else 3))
            frame = cv2.resize(frame, (320 + b * 4, 240 + b * 3),
                               interpolation=cv2.INTER_LINEAR)
            frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
            hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
            (B, G, R) = cv2.split(frame)  #auto white balance
            r_avg = cv2.mean(R)[0]
            g_avg = cv2.mean(G)[0]
            b_avg = cv2.mean(B)[0]
            k = (r_avg + g_avg + b_avg) / 3
            if k > 150:
                k = 150
            elif k < 120:
                k = 120
            kr = k / r_avg
            kg = k / g_avg
            kb = k / b_avg
            r = cv2.addWeighted(src1=R, alpha=kr, src2=0, beta=0, gamma=0)
            g = cv2.addWeighted(src1=G, alpha=kg, src2=0, beta=0, gamma=0)
            b = cv2.addWeighted(src1=B, alpha=kb, src2=0, beta=0, gamma=0)
            frame2 = frame
            frame = cv2.merge([b, g, r])
            cv2.setMouseCallback("Press q to end", mouse_event)
            if (cv2.waitKey(1) & 0xFF) == ord('c'):
                countc += 1
                name = "pictures/bizhi" + str(countc) + ".jpg"
                cv2.imwrite(name, frame)
                print('succeed!')
            if ax2 != ax:
                print('PIX:', ax, ay)
                print('BGR:', frame[ay, ax])
                print('HSV:', hsv[ay, ax])
            ax2 = ax
            ay2 = ay
            lower_green = np.array([0, 105, 25])  #dark 0,80,25,light 0,105,25
            upper_green = np.array([56, 255,
                                    109])  #light56,255,109#dark20,100,40
            mask = cv2.inRange(frame, lower_green, upper_green)
            mask = cv2.blur(mask, (9, 9))
            mask = cv2.Canny(mask, 120, 140)
            mask = cv2.blur(mask, (5, 5))
            image, cnt, hierarchy = cv2.findContours(mask, cv2.RETR_TREE,
                                                     cv2.CHAIN_APPROX_SIMPLE)
            rad1 = 15
            cen1 = (160, 120)
            for cnt1 in cnt:
                cen, rad = cv2.minEnclosingCircle(cnt1)
                cen = tuple(np.int0(cen))
                rad = np.int32(rad)
                if (rad >= rad1):
                    rad1 = int(rad / 1.18)
                    cen1 = cen
            frame = cv2.circle(frame2, cen1, rad1, (0, 255, 0), 2)
            frame = cv2.circle(frame2, cen1, 1, (255, 0, 0), 2)
            #print(cen1[1],cen1[0])
            ######################################maping to real axis/unit: mm/radius of real round: 150/delta pitch:91/delta yaw:68

            P = 240 / 4.8  #density of pixels/unit: pixels*mm^-1
            D2 = 6  #*1.31#the focal length
            Q2 = 91  #delta pitch
            Q1 = 68  #delta yaw
            Rreal = 150
            D1 = (
                Rreal /
                (rad1 / P)) * D2  #get the real distance between len and object
            cenn0 = int(cen1[0] - (Q1 / D1) * D2 * P)
            cenn1 = int(cen1[1] - (Q2 / D1) * D2 * P)
            #cen1=(cenn0,cenn1)
            #print(D1,rad1)
            #print(cen1[1],cen1[0])
            ######################################send to mcuyaw,yaw28',pitch22'
            yawx = cen1[0] - 160 - 31
            pitchy = 120 - cen1[1]
            yawAngle = math.atan(yawx / 50 / D2) * 180 / 3.14159
            pitchAngle = math.atan(pitchy / 50 / D2) * 180 / 3.14159
            #print(yawAngle,pitchAngle)
            intYawAngle = int(yawAngle * 5 + 220 + 140)
            intPitchAngle = int(pitchAngle * 5 + 110)
            head = int(intYawAngle / 64)  #move right 6 bits,and get it
            tail = (intYawAngle - int(intYawAngle / 64) * 64)  #get last 6 bits
            head = head + 64  #give flag bit "1" to first bit
            head = bytes(chr(head), encoding="utf-8")
            ser.write(head)
            time.sleep(0.0003)
            tail = bytes(chr(tail), encoding="utf-8")
            ser.write(tail)
            time.sleep(0.0003)
            head = int(intPitchAngle / 64)  #move right 6 bits,and get it
            tail = (intPitchAngle - int(intPitchAngle / 64) * 64
                    )  #get last 6 bits
            head = head + 64  #give flag bit "1" to first bit
            head = bytes(chr(head), encoding="utf-8")
            ser.write(head)
            time.sleep(0.0003)
            tail = bytes(chr(tail), encoding="utf-8")
            ser.write(tail)
            #####################################
            #第二参数()内是圆心坐标,第三参数是半径,第四参数()内是颜色,第五参数是线条粗细
            #cv2.imshow("img",image)
            cv2.imshow("Press q to end", frame2)
            name2 = "sendPitcures/bizhi" + ".jpg"
            #cv2.imwrite(name2,frame2)#sudo chmod 666 /dev/ttyUSB0

        except mvsdk.CameraException as e:
            if e.error_code != mvsdk.CAMERA_STATUS_TIME_OUT:
                print("CameraGetImageBuffer failed({}): {}".format(
                    e.error_code, e.message))

    # 关闭相机
    mvsdk.CameraUnInit(hCamera)

    # 释放帧缓存
    mvsdk.CameraAlignFree(pFrameBuffer)
예제 #18
0
파일: show_cam.py 프로젝트: ty9071/suzly2
    def startCam(self):
        # 枚举相机
        DevList = mvsdk.CameraEnumerateDevice()
        #	DevList = mvsdk.CameraEnumerateDeviceEx()
        nDev = len(DevList)
        print('Camera numbers:', nDev)
        for i in range(nDev):
            if self.cam_sn == DevList[i].acSn:
                DevInfo = DevList[i]
                break
        if DevInfo == 0:
            print('color_cam_sn is wrong, can not get Devinfo, please check')
            return
        print("Devinfo:", DevInfo)

        # 打开相机
        hCamera = 0
        try:
            hCamera = mvsdk.CameraInit(DevInfo, -1, -1)
        except mvsdk.CameraException as e:
            print("CameraInit Failed({}): {}".format(e.error_code, e.message))
            return

        # 获取相机特性描述
        cap = mvsdk.CameraGetCapability(hCamera)

        # 判断是黑白相机还是彩色相机
        monoCamera = (cap.sIspCapacity.bMonoSensor != 0)

        # 黑白相机让ISP直接输出MONO数据,而不是扩展成R=G=B的24位灰度
        if monoCamera:
            mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8)

        # 相机模式切换成连续采集
        mvsdk.CameraSetTriggerMode(hCamera, 0)
        # 手动曝光,曝光时间30ms
        mvsdk.CameraSetAeState(hCamera, 0)
        mvsdk.CameraSetGain(hCamera, int(self.once_wb[0]),
                            int(self.once_wb[1]), int(self.once_wb[2]))
        # set gain to make the image lighter
        mvsdk.CameraSetAnalogGain(hCamera, 20)
        mvsdk.CameraSetExposureTime(hCamera, float(self.exposetime) * 1000)
        # mvsdk.CameraSetSaturation(hCamera, 0)
        # mvsdk.CameraSetSharpness(hCamera, 0)
        tmp_ok = mvsdk.CameraGetSharpness(hCamera)
        print(tmp_ok)

        # 设置分辨率
        #       sRoiResolution = mvsdk.CameraGetImageResolution(hCamera)
        #       print(type(sRoiResolution))
        #       print(sRoiResolution)
        #       sRoiResolution.iIndex = 10
        #       mvsdk.CameraSetImageResolution(hCamera, sRoiResolution)
        #       sRoiResolution = mvsdk.CameraGetImageResolution(hCamera)
        #       print(sRoiResolution)

        # 让SDK内部取图线程开始工作
        mvsdk.CameraPlay(hCamera)

        # 计算RGB buffer所需的大小,这里直接按照相机的最大分辨率来分配
        FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * (
            1 if monoCamera else 3)

        # 分配RGB buffer,用来存放ISP输出的图像
        # 备注:从相机传输到PC端的是RAW数据,在PC端通过软件ISP转为RGB数据(如果是黑白相机就不需要转换格式,但是ISP还有其它处理,所以也需要分配这个buffer)
        pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16)

        self.mvsdk = mvsdk
        self.hCamera = hCamera
        self.pFrameBuffer = pFrameBuffer
예제 #19
0
파일: cv_grab_2.py 프로젝트: ustc-mbit/time
def main_loop():
    # 枚举相机
    DevList = mvsdk.CameraEnumerateDevice()
    nDev = len(DevList)
    if nDev < 1:
        print("No camera was found!")
        return

    DevInfo = DevList[0]
    print(DevInfo)

    # 打开相机
    hCamera = 0
    try:
        hCamera = mvsdk.CameraInit(DevInfo, -1, -1)
    except mvsdk.CameraException as e:
        print("CameraInit Failed({}): {}".format(e.error_code, e.message))
        return

    # 获取相机特性描述
    cap = mvsdk.CameraGetCapability(hCamera)

    # 判断是黑白相机还是彩色相机
    monoCamera = (cap.sIspCapacity.bMonoSensor != 0)

    # 黑白相机让ISP直接输出MONO数据,而不是扩展成R=G=B的24位灰度
    if monoCamera:
        mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8)

    # 相机模式切换成连续采集
    mvsdk.CameraSetTriggerMode(hCamera, 0)

    # 手动曝光,曝光时间40ms
    mvsdk.CameraSetAeState(hCamera, 0)
    mvsdk.CameraSetExposureTime(hCamera, 30 * 1000)

    # # #手动增益
    # print('相机增益', mvsdk.CameraGetGain(hCamera))
    # #RGB的每个通道增益
    mvsdk.CameraSetGain(hCamera, 200, 200, 200)

    # 让SDK内部取图线程开始工作
    mvsdk.CameraPlay(hCamera)

    # 计算RGB buffer所需的大小,这里直接按照相机的最大分辨率来分配
    FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * (
        1 if monoCamera else 3)

    # 分配RGB buffer,用来存放ISP输出的图像
    # 备注:从相机传输到PC端的是RAW数据,在PC端通过软件ISP转为RGB数据(如果是黑白相机就不需要转换格式,但是ISP还有其它处理,所以也需要分配这个buffer)
    pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16)

    while (cv2.waitKey(1) & 0xFF) != ord('q'):
        # 从相机取一帧图片
        try:
            pRawData, FrameHead = mvsdk.CameraGetImageBuffer(hCamera, 200)
            mvsdk.CameraImageProcess(hCamera, pRawData, pFrameBuffer,
                                     FrameHead)
            mvsdk.CameraReleaseImageBuffer(hCamera, pRawData)

            # 此时图片已经存储在pFrameBuffer中,对于彩色相机pFrameBuffer=RGB数据,黑白相机pFrameBuffer=8位灰度数据
            # 把pFrameBuffer转换成opencv的图像格式以进行后续算法处理
            frame_data = (mvsdk.c_ubyte *
                          FrameHead.uBytes).from_address(pFrameBuffer)
            frame = np.frombuffer(frame_data, dtype=np.uint8)
            frame = frame.reshape((FrameHead.iHeight, FrameHead.iWidth,
                                   1 if FrameHead.uiMediaType
                                   == mvsdk.CAMERA_MEDIA_TYPE_MONO8 else 3))
            # # 实时显示
            # cv2.imshow('frame', frame)
            #临时输出距离均值
            #BGR
            X = cv2.pyrDown(cv2.pyrDown(frame))
            #保存
            cv2.imwrite(
                r'E:\pycharm\camera\navigationWithZ\camera_develop\result\capture\ori.bmp',
                frame)
            # #图片翻转
            # frame=cv2.flip(frame,0)
            # #旋转
            w, h = frame.shape[:2]
            # print(w,h)
            M = cv2.getRotationMatrix2D((h / 2, w / 2), 180, 1)
            frame = cv2.warpAffine(frame, M, (h, w))
            # cv2.imshow('capture1',frame)
            # cv2.waitKey(0)
            # print(frame,type(frame),frame.shape)
            #深度学习框架调用
            # frame=deep_convert(frame)
            #矩阵框架
            #滤波
            frame = cv2.GaussianBlur(frame, (0, 0), 1.8)
            # frame=cv2.blur(frame,(5,5))
            # frame=cv2.medianBlur(frame,9)
            # frame=cv2.bilateralFilter(frame,d=0,sigmaColor=50,sigmaSpace=15)
            frame = image_input(frame)

            #临时计算距离平均
            Y = cv2.pyrDown(cv2.pyrDown(frame))
            #保存
            cv2.imwrite(
                r'E:\pycharm\camera\navigationWithZ\camera_develop\result\capture\correct.bmp',
                frame)
            #显示距离均值
            t = np.sum(((X - Y)**2)**0.5, axis=2)
            print('距离均值', np.mean(t), '距离方差', np.var(t))
            # 图片翻转
            # frame = cv2.flip(frame, 1)
            #实时显示
            #滤波
            # frame=cv2.GaussianBlur(frame,(0,0),1.8)
            # frame=cv2.blur(frame,(5,5))

            #不加投影仪时显示使用
            # frame=cv2.medianBlur(frame,3)
            # frame=cv2.bilateralFilter(frame,d=0,sigmaSpace=15,sigmaColor=100)
            # cv2.imshow('frame',frame)
            # cv2.waitKey( )

            # #相机投影之间的矫正关系
            fx = 0.67
            fy = 0.67
            frame = cv2.resize(frame, None, fx=fx, fy=fy)

            #贴到背景图
            w, h, d = frame.shape
            print('shape', w, h, d)
            # frame=cv2.flip(frame,0)
            M = cv2.getRotationMatrix2D((h / 2, w / 2), 180, 1)
            frame = cv2.warpAffine(frame, M, (h, w))
            # print(w,h)
            # 起点 l,t
            l, t = 145, 205

            img_0[l:l + w, t:t + h, 0] = frame[:, :, 0]
            img_0[l:l + w, t:t + h, 1] = frame[:, :, 1]
            img_0[l:l + w, t:t + h, 2] = frame[:, :, 2]
            cv2.imshow('projector', img_0)
        except mvsdk.CameraException as e:
            if e.error_code != mvsdk.CAMERA_STATUS_TIME_OUT:
                print("CameraGetImageBuffer failed({}): {}".format(
                    e.error_code, e.message))

    # 关闭相机
    mvsdk.CameraUnInit(hCamera)

    # 释放帧缓存
    mvsdk.CameraAlignFree(pFrameBuffer)
예제 #20
0
                    box2 = cv2.boxPoints(rect2)
                    box2 = np.int0(box2)
                    finalPair = [box1, box2]
                    finalReact = [rects[i], rect2]
        if midPoint is not None:
            width_pixel = armor_width(finalReact[0], finalReact[1])

        #cv2.drawContours(frame,filteredCont,-1,(0,255,0),3)
        cv2.circle(frame, midPoint, 10, (255, 255, 0), 5)

        return midPoint, width_pixel


if __name__ == '__main__':

    DevList = mvsdk.CameraEnumerateDevice()
    nDev = len(DevList)
    if nDev < 1:
        print("No camera was found!")

    for i, DevInfo in enumerate(DevList):
        print("{}: {} {}".format(i, DevInfo.GetFriendlyName(),
                                 DevInfo.GetPortType()))
    i = 0
    DevInfo = DevList[i]
    print(DevInfo)

    hCamera = 0
    try:
        hCamera = mvsdk.CameraInit(DevInfo, -1, -1)
    except mvsdk.CameraException as e:
예제 #21
0
파일: camera1.py 프로젝트: ustc-mbit/time
    def __init__(self):
        # 枚举相机
        DevList = mvsdk.CameraEnumerateDevice()
        self.nDev = len(DevList)
        if self.nDev < 1:
            print("No camera was found!")
            return

        self.DevInfo = DevList[0]
        print(self.DevInfo)

        # 打开相机
        self.hCamera = 0
        try:
            self.hCamera = mvsdk.CameraInit(self.DevInfo, -1, -1)
        except mvsdk.CameraException as e:
            print("CameraInit Failed({}): {}".format(e.error_code, e.message))
            return

        # 获取相机特性描述
        self.cap = mvsdk.CameraGetCapability(self.hCamera)

        # 判断是黑白相机还是彩色相机
        self.monoCamera = (self.cap.sIspCapacity.bMonoSensor != 0)

        # 黑白相机让ISP直接输出MONO数据,而不是扩展成R=G=B的24位灰度
        if self.monoCamera:
            mvsdk.CameraSetIspOutFormat(self.hCamera,
                                        mvsdk.CAMERA_MEDIA_TYPE_MONO8)

        # 相机模式切换  连续采集0,软触发1,硬触发2
        mvsdk.CameraSetTriggerMode(self.hCamera, 0)

        mvsdk.CameraGetTriggerMode(self.hCamera)

        # 设置并获取硬触发信号种类  0上升沿触发 1下降沿 2高电平 3低电平
        # mvsdk.CameraSetExtTrigSignalType(self.hCamera, 1)
        #
        # mvsdk.CameraGetExtTrigSignalType(self.hCamera)

        # 设置并获取相机外触发模式下的触发延迟时间,单位是微秒 当硬触发信号来临后,经过指定的延时,再开始采集图像。
        # mvsdk.CameraSetTriggerDelayTime(self.hCamera, 100)
        #
        # mvsdk.CameraGetTriggerDelayTime(self.hCamera)

        # 另一种设置外触发信号延迟时间的函数
        # mvsdk.CameraSetExtTrigDelayTime(hCamera, 0)

        # mvsdk.CameraGetExtTrigDelayTime(hCamera)

        # 设置触发模式下  一次触发的帧数 默认为1帧
        # mvsdk.CameraSetTriggerCount(hCamera, 1)

        # mvsdk.CameraGetTriggerCount(hCamera)

        # 手动曝光 曝光时间3ms
        mvsdk.CameraSetAeState(self.hCamera, 0)
        mvsdk.CameraSetExposureTime(self.hCamera, 30 * 1000)

        # 设置增益,获得增益
        mvsdk.CameraSetAnalogGain(self.hCamera, 1)
        mvsdk.CameraGetAnalogGain(self.hCamera)

        # 让SDK内部取图线程开始工作
        mvsdk.CameraPlay(self.hCamera)

        # 计算RGB buffer所需的大小,这里直接按照相机的最大分辨率来分配
        self.FrameBufferSize = self.cap.sResolutionRange.iWidthMax * self.cap.sResolutionRange.iHeightMax *\
                               (1 if self.monoCamera else 3)

        # 设置相机的分辨率,外触发需要调至800*600居中裁剪
        mvsdk.CameraSetImageResolution(
            self.hCamera, mvsdk.CameraCustomizeResolution(self.hCamera))

        # 分配RGB buffer,用来存放ISP输出的图像
        # 备注:从相机传输到PC端的是RAW数据,在PC端通过软件ISP转为RGB数据(如果是黑白相机就不需要转换格式,但是ISP还有其它处理,所以也需要分配这个buffer)
        self.pFrameBuffer = mvsdk.CameraAlignMalloc(self.FrameBufferSize, 16)
예제 #22
0
def main_loop():
    # 枚举相机
    count = 0
    number = 0
    DevList = mvsdk.CameraEnumerateDevice()
    nDev = len(DevList)
    if nDev < 1:
        print("No camera was found!")
        return

    for i, DevInfo in enumerate(DevList):
        print("{}: {} {}".format(i, DevInfo.GetFriendlyName(),
                                 DevInfo.GetPortType()))

    cams = []
    for i in map(lambda x: int(x), input("Select cameras: ").split()):
        cam = Camera(DevList[i])
        if cam.open():
            cams.append(cam)

    while (cv2.waitKey(1) & 0xFF) != ord('q'):
        for cam in cams:
            frame = cam.grab()
            if frame is not None:
                image = cv2.resize(frame, (1280, 720),
                                   interpolation=cv2.INTER_LINEAR)

                cv2.circle(image, left_top, 2, (0, 255, 0), thickness=2)
                cv2.circle(image, right_top, 2, (0, 255, 0), thickness=2)
                cv2.circle(image, left_bottom, 2, (0, 255, 0), thickness=2)
                cv2.circle(image, right_bottom, 2, (0, 255, 0), thickness=2)

                cv2.rectangle(image, (left_top[0] + 4, left_top[1] + 4),
                              (left_top[0] - 4, left_top[1] - 4), (0, 0, 255),
                              thickness=2)
                cv2.rectangle(image, (right_top[0] + 4, right_top[1] + 4),
                              (right_top[0] - 4, right_top[1] - 4),
                              (0, 0, 255),
                              thickness=2)
                cv2.rectangle(image, (left_bottom[0] + 4, left_bottom[1] + 4),
                              (left_bottom[0] - 4, left_bottom[1] - 4),
                              (0, 0, 255),
                              thickness=2)
                cv2.rectangle(image,
                              (right_bottom[0] + 4, right_bottom[1] + 4),
                              (right_bottom[0] - 4, right_bottom[1] - 4),
                              (0, 0, 255),
                              thickness=2)

                cv2.imshow(
                    "{} Press q to end".format(cam.DevInfo.GetFriendlyName()),
                    image)

                Bird_img = cv2.warpPerspective(image, M, (Image_W, Image_H))
                Bird_img = cv2.resize(Bird_img, (720, 900))

                cv2.circle(Bird_img, ref_point, 2, (0, 99, 99), thickness=2)
                cv2.rectangle(Bird_img, (ref_point[0] + 4, ref_point[1] + 4),
                              (ref_point[0] - 4, ref_point[1] - 4),
                              (0, 255, 0),
                              thickness=2)

                # # detection
                detect(Bird_img)
                cv2.imshow("Result", Bird_img)
                cv2.setMouseCallback("Result", mouse_event)

    for cam in cams:
        cam.close()
예제 #23
0
def main_loop():
    # 枚举相机
    DevList = mvsdk.CameraEnumerateDevice()
    nDev = len(DevList)
    if nDev < 1:
        print("No camera was found!")
        return

    for i, DevInfo in enumerate(DevList):
        print("{}: {} {}".format(i, DevInfo.GetFriendlyName(),
                                 DevInfo.GetPortType()))
    i = input("Select camera: ")
    DevInfo = DevList[i]
    print(DevInfo)

    # 打开相机
    hCamera = 0
    try:
        hCamera = mvsdk.CameraInit(DevInfo, -1, -1)
    except mvsdk.CameraException as e:
        print("CameraInit Failed({}): {}".format(e.error_code, e.message))
        return

    # 获取相机特性描述
    cap = mvsdk.CameraGetCapability(hCamera)

    # 判断是黑白相机还是彩色相机
    monoCamera = (cap.sIspCapacity.bMonoSensor != 0)

    # 黑白相机让ISP直接输出MONO数据,而不是扩展成R=G=B的24位灰度
    if monoCamera:
        mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8)

    # 相机模式切换成连续采集
    mvsdk.CameraSetTriggerMode(hCamera, 0)
    mvsdk.CameraSetAeState(hCamera, 0)
    mvsdk.CameraSetExposureTime(hCamera, 5 * 1000)
    mvsdk.CameraSetGain(hCamera, 100, 100, 100)
    # 让SDK内部取图线程开始工作
    mvsdk.CameraPlay(hCamera)

    # 计算RGB buffer所需的大小,这里直接按照相机的最大分辨率来分配
    FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * (
        1 if monoCamera else 3)

    # 分配RGB buffer,用来存放ISP输出的图像
    # 备注:从相机传输到PC端的是RAW数据,在PC端通过软件ISP转为RGB数据(如果是黑白相机就不需要转换格式,但是ISP还有其它处理,所以也需要分配这个buffer)
    pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16)
    prev_t = time.time()
    while (cv2.waitKey(1) & 0xFF) != ord('q'):
        # 从相机取一帧图片
        try:
            pRawData, FrameHead = mvsdk.CameraGetImageBuffer(hCamera, 200)
            mvsdk.CameraImageProcess(hCamera, pRawData, pFrameBuffer,
                                     FrameHead)
            mvsdk.CameraReleaseImageBuffer(hCamera, pRawData)

            # 此时图片已经存储在pFrameBuffer中,对于彩色相机pFrameBuffer=RGB数据,黑白相机pFrameBuffer=8位灰度数据
            # 把pFrameBuffer转换成opencv的图像格式以进行后续算法处理
            frame_data = (mvsdk.c_ubyte *
                          FrameHead.uBytes).from_address(pFrameBuffer)
            frame = np.frombuffer(frame_data, dtype=np.uint8)
            frame = frame.reshape((FrameHead.iHeight, FrameHead.iWidth,
                                   1 if FrameHead.uiMediaType
                                   == mvsdk.CAMERA_MEDIA_TYPE_MONO8 else 3))
            print(1 / (time.time() - prev_t))
            prev_t = time.time()
            cv2.imshow("capture", frame)

        except mvsdk.CameraException as e:
            if e.error_code != mvsdk.CAMERA_STATUS_TIME_OUT:
                print("CameraGetImageBuffer failed({}): {}".format(
                    e.error_code, e.message))

    # 关闭相机
    mvsdk.CameraUnInit(hCamera)

    # 释放帧缓存
    mvsdk.CameraAlignFree(pFrameBuffer)
예제 #24
0
def train(dataset, show_bar=False):
    x = tf.placeholder(tf.float32, [None, generate.SRC_ROWS, generate.SRC_COLS, generate.SRC_CHANNELS])
    y_ = tf.placeholder(tf.float32, [None, forward.OUTPUT_NODES])
    keep_rate = tf.placeholder(tf.float32)
    nodes, vars, vars_name = forward.forward(x, 0.01, keep_rate)
    y = nodes[-1]

    ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
    #    ce  = tf.nn.weighted_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1), pos_weight=1)
    cem = tf.reduce_mean(ce)
    loss = cem + tf.add_n(tf.get_collection("losses"))

    global_step = tf.Variable(0, trainable=False)
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE,
        global_step,
        len(dataset.train_samples) / BATCH,
        LEARNING_RATE_DECAY,
        staircase=False)
    train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss, global_step=global_step)

    ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    ema_op = ema.apply(tf.trainable_variables())
    with tf.control_dependencies([train_step, ema_op]):
        train_op = tf.no_op(name='train')

    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
    with tf.Session(config=config) as sess:
        init_op = tf.global_variables_initializer()
        sess.run(init_op)

        bar = tqdm(range(STEPS), ascii=True, dynamic_ncols=True)
        for i in bar:
            images_samples, labels_samples = dataset.sample_train_sets(BATCH, 0.03)

            _, loss_value, step = sess.run(
                [train_op, loss, global_step],
                feed_dict={x: images_samples, y_: labels_samples, keep_rate: 0.3}
            )

            if step % 500 == 0:
                test_images, test_labels = dataset.sample_test_sets(10000)
                test_acc, output = sess.run([accuracy, y],
                                            feed_dict={x: test_images, y_: test_labels, keep_rate: 1.0})
                output = np.argmax(output, axis=1)
                real = np.argmax(test_labels, axis=1)
                print("=============test-set===============")
                for n in range(forward.OUTPUT_NODES):
                    print("label: %d, precise: %f, recall: %f" %
                          (n, np.mean(real[output == n] == n), np.mean(output[real == n] == n)))

                train_images, train_labels = dataset.sample_train_sets(10000)
                train_acc, output = sess.run([accuracy, y],
                                             feed_dict={x: train_images, y_: train_labels, keep_rate: 1.0})
                output = np.argmax(output, axis=1)
                real = np.argmax(train_labels, axis=1)
                print("=============train-set===============")
                for n in range(forward.OUTPUT_NODES):
                    print("label: %d, precise: %f, recall: %f" %
                          (n, np.mean(real[output == n] == n), np.mean(output[real == n] == n)))
                print("\n")
                if train_acc >= 0.99 and test_acc >= 0.99:
                    vars_val = sess.run(vars)
                    save_para(
                        "model",
                        vars_val,
                        vars_name,
                        "steps:%d-train_acc:%f-test_acc:%f" % (step, train_acc, test_acc)
                    )
                bar.set_postfix({"loss": loss_value, "train_acc": train_acc, "test_acc": test_acc})
        # print("save done!")

        # pred = sess.run(y, feed_dict={x: test_images, keep_rate:1.0})

        #        nodes_val = sess.run(nodes, feed_dict={x:test_images})
        #        return vars_val, nodes_val
        DevList = mvsdk.CameraEnumerateDevice()
        nDev = len(DevList)
        if nDev < 1:
            print("No camera was found!")
            return

        for i, DevInfo in enumerate(DevList):
            print("{}: {} {}".format(i, DevInfo.GetFriendlyName(), DevInfo.GetPortType()))
        i = 0 if nDev == 1 else int(input("Select camera: "))
        DevInfo = DevList[i]
        print(DevInfo)

        # 打开相机
        hCamera = 0
        try:
            hCamera = mvsdk.CameraInit(DevInfo, -1, -1)
        except mvsdk.CameraException as e:
            print("CameraInit Failed({}): {}".format(e.error_code, e.message))
            return

        # 获取相机特性描述
        cap = mvsdk.CameraGetCapability(hCamera)

        # 判断是黑白相机还是彩色相机
        monoCamera = (cap.sIspCapacity.bMonoSensor != 0)

        # 黑白相机让ISP直接输出MONO数据,而不是扩展成R=G=B的24位灰度
        if monoCamera:
            mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8)
        else:
            mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_BGR8)

        # 相机模式切换成连续采集
        mvsdk.CameraSetTriggerMode(hCamera, 0)

        # 手动曝光,曝光时间30ms
        mvsdk.CameraSetAeState(hCamera, 0)
        mvsdk.CameraSetExposureTime(hCamera, 30 * 1000)

        # 让SDK内部取图线程开始工作
        mvsdk.CameraPlay(hCamera)

        # 计算RGB buffer所需的大小,这里直接按照相机的最大分辨率来分配
        FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * (1 if monoCamera else 3)

        # 分配RGB buffer,用来存放ISP输出的图像
        # 备注:从相机传输到PC端的是RAW数据,在PC端通过软件ISP转为RGB数据(如果是黑白相机就不需要转换格式,但是ISP还有其它处理,所以也需要分配这个buffer)
        pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16)

        while (cv2.waitKey(1) & 0xFF) != ord('q'):
            # 从相机取一帧图片
            try:
                pRawData, FrameHead = mvsdk.CameraGetImageBuffer(hCamera, 200)
                mvsdk.CameraImageProcess(hCamera, pRawData, pFrameBuffer, FrameHead)
                mvsdk.CameraReleaseImageBuffer(hCamera, pRawData)
                # 此时图片已经存储在pFrameBuffer中,对于彩色相机pFrameBuffer=RGB数据,黑白相机pFrameBuffer=8位灰度数据
                # 把pFrameBuffer转换成opencv的图像格式以进行后续算法处理
                frame_data = (mvsdk.c_ubyte * FrameHead.uBytes).from_address(pFrameBuffer)
                frame = np.frombuffer(frame_data, dtype=np.uint8)
                frame = frame.reshape((FrameHead.iHeight, FrameHead.iWidth,
                                       1 if FrameHead.uiMediaType == mvsdk.CAMERA_MEDIA_TYPE_MONO8 else 3))

                frame = cv2.resize(frame, (640, 480), interpolation=cv2.INTER_LINEAR)
                cv2.imshow("Press q to end", frame)
                if (cv2.waitKey(1) & 0xFF) == ord(' '):
                    roi = cv2.selectROI("roi", frame)
                    roi = frame[roi[1]:roi[1] + roi[3], roi[0]:roi[0] + roi[2]]
                    print(roi)
                    cv2.imshow("box", roi)
                    image = cv2.resize(roi, (48, 36))
                    image = image.astype(np.float32) / 255.0
                    out = sess.run(y, feed_dict={x: [image]})
                    print(out)
                    print(np.argmax(out))

            except mvsdk.CameraException as e:
                if e.error_code != mvsdk.CAMERA_STATUS_TIME_OUT:
                    print("CameraGetImageBuffer failed({}): {}".format(e.error_code, e.message))

        # 关闭相机
        mvsdk.CameraUnInit(hCamera)

        # 释放帧缓存
        mvsdk.CameraAlignFree(pFrameBuffer)