예제 #1
0
    def __init__(self, line_camera, od_camera, serial):
        super().__init__(line_camera=line_camera,
                         od_camera=od_camera,
                         serial_port=serial)

        # region 自定义变量
        """
            建议自定义变量放在这里
        """
        self.p_offset = 0

        # endregion

        # region 新建运行的各种对象
        """
             根据实际情况,把所有需要新建的对象都放在以下代码块中
             把新建的对象加入到需要循环执行的队列中
             根据需要设定事件处理函数
        """
        # 识别对象在基类(CarBase)中已经建立,在子类中只需要指定事件处理函数
        self.recognition.event_function = self.e_recognition
        # 把对象加入到循环队列中
        CarMain.task_list.append(self.recognition)

        # 新建一个图像处理对象,由于这是一个图形初始化,转换为二值图的的对象,所以应该是第一个加入到任务队列中
        self.init = ImageInit(width=self.line_camera_width,
                              height=self.line_camera_height,
                              threshold=50,
                              bitwise_not=True,
                              iterations=3)
        self.init.event_function = self.e_image_init
        CarMain.task_list.append(self.init)

        # 巡线对象,事件处理函数中会返回一个offset的参数,你也可以通过fl.offset的形式访问这个值
        self.fl = FollowLine(self.line_camera_width,
                             self.line_camera_height,
                             direction=False,
                             threshold=5)
        self.fl.event_function = self.e_flowing_line
        CarMain.task_list.append(self.fl)

        # 寻找路口对象,事件处理函数中会返回一个intersection_number参数,你也可以通过fi.intersection_number调用这个属性
        self.fi = FindIntersection(radius=150,
                                   threshold=4,
                                   repeat_count=2,
                                   delay_time=1.6)
        self.fi.event_function = self.e_find_intersection
        CarMain.task_list.append(self.fi)

        # 寻找斑马线的对象,事件处理函数中不会返回值
        self.fz = FindZebraCrossing(threshold=4, floor_line_count=3)
        self.fz.event_function = self.e_find_zebra_crossing
        CarMain.task_list.append(self.fz)
import cv2
import sys
sys.path.append('..')
from cv.image_init import ImageInit
from cv.find_key_point import FindKeyPoint
from cv.hough_line_transform import HoughLines

image_p = ImageInit(320,
                    240,
                    convert_type="BINARY",
                    threshold=120,
                    bitwise_not=False)
capture = cv2.VideoCapture(0)
fkp = FindKeyPoint(True)
hl = HoughLines()
while True:
    ret, frame = capture.read()

    image2 = image_p.processing(frame)
    _, image3 = fkp.get_key_point(image2, frame)
    lines = hl.get_lines(image2, frame)
    print(lines)

    cv2.imshow('frame', image2)
    cv2.imshow("fkp", image3)
    cv2.imshow("hl", frame)
    if cv2.waitKey(1) == ord('q'):
        break
capture.release()
cv2.destroyAllWindows()
예제 #3
0
SERIAL = "/dev/ttyACM0"  # 串口
CAMERA = '/dev/video0'  # USB摄像头,如果有多个摄像头,各个摄像头设备文件就是video0,video1,video2等等

camera = cv2.VideoCapture(CAMERA)

# 初始化显示对象,该对象专门为小车的7寸屏幕设计,当多个视频需要显示时,自动排列窗口的位置,避免窗口重叠。
# 同时该对象把所有的窗口大小都设置为320*240以适应小屏幕。
display = ShowImage()

# 对象用于对输入的图形进行二值化(或者灰度),同时对图形进行腐蚀,以去除部分图像噪声。
# 具体的参数的意义请参考类说明
# 这里要特别注意,bitwise_not为True时图像颜色进行了反转,对于灰度图,也就是黑变白,白变黑,适合于引导线是黑色的地图。
init = ImageInit(width=320,
                 height=240,
                 convert_type="BINARY",
                 threshold=120,
                 bitwise_not=True)

# fl对象用于寻找引导线偏离图像中心的位置,threshold是控制连续白色的的阈值,也就是只有连续多少个白色像素点才认为已经找到引导线
# direction是开始寻找的方向,True是从左边开始寻找,False是右边。当顺时针绕圈时,引导线大概率出现在右边,所以可以选择False。
fl = FollowLine(width=320, height=240, threshold=15, direction=False)

# 串口类,此类最好不要直接使用,而是通过CarController来对车子进行控制
serial = CarSerial(SERIAL, receive=True)
# 此类并没有实现PID控制,而是简单的使用了比例这个参数。(现在这么简单的地图还无需用到PID)
# 如果需要使用PID可以直接调用car目录下的pid类,同时把此类的比例参数设置为1
ctrl = CarController(serial, proportional=0.4)
p_offset = 0
while True:
    ret, frame = camera.read()  # 读取每一帧
예제 #4
0
import cv2
import sys
sys.path.append('..')
from cv.show_images import ShowImage
import time
from cv.image_init import ImageInit
LINE_CAMERA_WIDTH = 320
LINE_CAMERA_HEIGHT = 240
camera = cv2.VideoCapture('/dev/video0')
freq = cv2.getTickFrequency()
show_image = ShowImage()

init = ImageInit(320, 240)
ret, frame = camera.read()

while True:
    t1 = time.perf_counter()
    # 获取一帧
    ret, frame = camera.read()

    show_image.show(frame)

    image = init.processing(frame)
    show_image.show(image, window_name="image")
    t2 = time.perf_counter()
    frame_rate_calc = 1.0 / (t2 - t1)
    print(frame_rate_calc)
    if cv2.waitKey(1) == ord('q'):
        break

camera.release()
        :param render_frame_list: 需要渲染的帧
        :return: 没有返回值
        """
        if self.is_intersection(find_image=frame, render_image=render_frame_list[0]):
            if not (self.event_function is None):
                self.event_function(intersection_number=self.__intersection_number)


if __name__ == '__main__':
    LINE_CAMERA = '/dev/video1'
    LINE_CAMERA_WIDTH = 320
    LINE_CAMERA_HEIGHT = 240
    camera = cv2.VideoCapture(LINE_CAMERA)
    # ret = camera.set(3, LINE_CAMERA_WIDTH)
    # ret = camera.set(4, LINE_CAMERA_HEIGHT)
    im_p = ImageInit(320, 240)
    while True:
        ret, image = camera.read()
        cv2.imshow("image", image)
        image2 = im_p.processing(image)
        # image_processing(image, width=320, height=240, threshold=248, convert_type="BINARY")
        cv2.imshow("test_one", image2)
        fi = FindIntersection(150)
        # data = _find(image2, (160, 230), image)
        data2 = fi._find(image2, (160, 200), image)

        # print(data)
        print(data2)
        cv2.imshow("_render_image", image)

        if cv2.waitKey(1) == ord('q'):
예제 #6
0
"""
    本实例演示怎样打开摄像头,新建一个ImageInit实例把图片转换为二值图,通过滑动条调整该实例参数,使生成的图片效果最好。
    本实例可用于比赛现场参数的调试,记录滑动条数值后,作为ImageInit实例的参数,可以用于比赛。
"""
import cv2
import sys
sys.path.append('..')
from cv.image_init import ImageInit

CAMERA = '/dev/video1'  # USB摄像头,如果有多个摄像头,各个摄像头设备文件就是video0,video1,video2等等

camera = cv2.VideoCapture(CAMERA)  # 新建摄像头视频VideoCapture对象
init = ImageInit(width=320,
                 height=240,
                 convert_type="BINARY",
                 threshold=250,
                 bitwise_not=True)  # 初始化ImageInit对象

init.resize_threshold(
    camera)  # 利用ImageInit类中的resize_threshold方法可以调试二值图的阈值,从而寻找到合理的
# 黑白阈值。
camera.release()
cv2.destroyAllWindows()
import sys
sys.path.append("..")                       # 添加模块路径
from cv.image_init import ImageInit         # 导入类
from cv.show_images import ShowImage


CAMERA = '/dev/video0'      # USB摄像头,如果有多个摄像头,各个摄像头设备文件就是video0,video1,video2等等

camera = cv2.VideoCapture(CAMERA)

# 初始化显示对象,该对象专门为小车的7寸屏幕设计,当多个视频需要显示时,自动排列窗口的位置,避免窗口重叠。
# 同时该对象把所有的窗口大小都设置为320*240以适应小屏幕。
display = ShowImage()

# 对象用于对输入的图形进行二值化(或者灰度),同时对图形进行腐蚀,以去除部分图像噪声。
# 具体的参数的意义请参考类说明
init = ImageInit(width=320, height=240, convert_type="BINARY", threshold=250)


while True:
    ret, frame = camera.read()          # 读取每一帧
    display.show(frame, "frame")           # 在屏幕上的frame窗口显示帧
    image = init.processing(frame)         # 对帧进行处理
    display.show(image, "image")           # 显示处理后的帧

    # 检测键盘,发现按下 q 键 退出循环
    if cv2.waitKey(1) == ord('q'):
        break
camera.release()                        # 释放摄像头
cv2.destroyAllWindows()                 # 关闭所有窗口
예제 #8
0
# 小车控制器
ctrl = CarController(car_serial=serial, base_speed=80)
# 识别对象
rc = Recognition(device=OD_CAMERA,
                 width=OD_CAMERA_WIDTH,
                 height=OD_CAMERA_HEIGHT,
                 frequency=20)

# cv巡线对象
camera = cv2.VideoCapture(LINE_CAMERA)
ret, frame = camera.read()

# 基本图像处理对象
img_init = ImageInit(LINE_CAMERA_WIDTH,
                     LINE_CAMERA_HEIGHT,
                     threshold=60,
                     kernel_type=(3, 3),
                     iterations=2,
                     bitwise_not=True)
# 巡线对象
qf_line = FollowLine(LINE_CAMERA_WIDTH,
                     LINE_CAMERA_HEIGHT,
                     direction=False,
                     threshold=5)
# 寻找路口对象
fi = FindIntersection(radius=150, threshold=4, repeat_count=2, delay_time=1.7)
# 寻找路障对象
fr = FindRoadblock(0, 200, 134, 255, 202, 255, 0.05)
# 寻找斑马线对象
fzc = FindZebraCrossing(threshold=4, floor_line_count=3)
# 保存视频对象
# vw = VideoWriter("video/" + time.strftime("%Y%m%d%H%M%S"), 320, 240)
예제 #9
0
import os

sys.path.append('..')
sys.path.append('../FaceMaskDetection/')
from cv.show_images import ShowImage
import time
from cv.image_init import ImageInit
from FaceMaskDetection.mask_detect import MaskDetect

LINE_CAMERA_WIDTH = 260
LINE_CAMERA_HEIGHT = 260
camera = cv2.VideoCapture('/dev/video0')
freq = cv2.getTickFrequency()
show_image = ShowImage()

init = ImageInit(260, 260)
j_path = os.path.abspath(os.path.dirname(
    os.getcwd())) + '/FaceMaskDetection/models/face_mask_detection.json'
w_path = os.path.abspath(os.path.dirname(
    os.getcwd())) + '/FaceMaskDetection/models/face_mask_detection.hdf5'

mask = MaskDetect(json_path=j_path,
                  weight_path=w_path,
                  width=LINE_CAMERA_WIDTH,
                  height=LINE_CAMERA_HEIGHT)

while True:
    t1 = time.perf_counter()
    # 获取一帧
    ret, frame = camera.read()
예제 #10
0
        self._nonmax = nonmax


    def get_key_point(self, frame, render_image=None):
        if self._nonmax:
            self._fast.setNonmaxSuppression(5)
        kp = self._fast.detect(frame, None)
        if not (render_image is None):
            return kp, cv2.drawKeypoints(render_image, kp, None, color=(255, 0, 0))
        else:
            return kp, None



if __name__ == '__main__':
    fkp = FindKeyPoint(False)
    capture = cv2.VideoCapture(0)
    im_p =ImageInit()
    while True:
        ret, frame = capture.read()
        render_image = frame.copy()

        image2 = im_p.processing(frame)
        _, rimg = fkp.get_key_point(image2, render_image)
        cv2.imshow("1", rimg)
        cv2.imshow('frame', render_image)

        if cv2.waitKey(1) == ord('q'):
            break
    capture.release()
    cv2.destroyAllWindows()