Example #1
0
 def init_parser(self, parser):
     Detect.init_parser(self, parser)
     parser.add_argument('--detect_node_id', default=0, type=int,
             help = """ specify the node id you want to monitor,
             default value is 0""")
     parser.add_argument('--no_sim', default=False, action='store_true',
             help = """turn on this switch to disable the fs simulaiton""")
Example #2
0
    def __init__(self, argv):
        Detect.__init__(self, argv)
        self.sim = Sim(argv)

        self.output_flow_file = self.ROOT + \
                '/Simulator/n%i_flow.txt'%(self.args.detect_node_id)
        self.args.data = self.output_flow_file

        self.export_abnormal_flow_file = self.ROOT + \
                '/Simulator/abnormal_n%i_flow.txt'%(self.args.detect_node_id)
Example #3
0
 def detect(self):
     # 呼叫Detect.py檔,做rule-base偵測
     Det = Detect(self.filename_pro)
     Det.process()
     Det.trend()
     self.data_rule_df = Det.signal()
     Det.result()
     self.save(self.data_rule_df, self.filename_rule, 'csv')
Example #4
0
    def btn_click(self):
        object_target = self.lbl_image_after
        object_origin_name = self.sender().objectName()

        no_line = self.get_boolean(self.ck_noline.checkState())
        cut = self.get_boolean(self.ck_cut.checkState())
        join = self.get_boolean(self.ck_join.checkState())
        p_font = self.get_boolean(self.ck_print.checkState())
        threshold_binary = int(self.sb_binary_value.text())
        threshold_ratio = float(self.sb_ratio_value.text())

        rect_char = RectChar(no_line, cut, join, p_font, threshold_binary,
                             threshold_ratio)
        gray_image = rect_char.get_gray_image(self.original_image)
        binary_image = rect_char.get_binary_image(gray_image)
        result_list = rect_char.get_char_list(binary_image)
        result_image = None

        if object_origin_name == "btn_split":
            print("123")
        elif object_origin_name == "btn_gray":
            result_image = self.get_pix_from_mat(gray_image,
                                                 object_target.width(),
                                                 object_target.height())
        elif object_origin_name == "btn_brinary":
            result_image = self.get_pix_from_mat(binary_image,
                                                 object_target.width(),
                                                 object_target.height())
        elif object_origin_name == "btn_location":
            result_image = self.original_image.copy()
            for char in result_list:
                cv2.rectangle(result_image, (char[0], char[1]),
                              (char[2], char[3]), (0, 0, 255))
            result_image = self.get_pix_from_mat(result_image,
                                                 object_target.width(),
                                                 object_target.height())
        elif object_origin_name == "btn_recognize":
            image_list = []
            rect_temp = None
            for rect in result_list:
                if rect_temp is not None and (rect_temp[2] > rect[0]
                                              and rect_temp[1] < rect[3]):
                    image_list.append(None)
                image_list.append(
                    gray_image[rect[1]:rect[3], rect[0]:rect[2], :] / 255)
                rect_temp = rect
            detect = Detect()
            result_str = detect.find_class(image_list)
            self.text_result.setText(result_str)
            print(result_str)

        if result_image is not None:
            object_target.setPixmap(result_image)
Example #5
0
 def init_parser(self, parser):
     Detect.init_parser(self, parser)
     parser.add_argument('--detect_node_id',
                         default=0,
                         type=int,
                         help=""" specify the node id you want to monitor,
             default value is 0""")
     parser.add_argument(
         '--no_sim',
         default=False,
         action='store_true',
         help="""turn on this switch to disable the fs simulaiton""")
Example #6
0
class Play():
    def __init__(self, song):
        self.audio = Audio()
        self.audio.preOpen()
        self.detect = Detect()
        pygame.init()
        self.audio.open()
        self.song = LoadSong(song).song
        pygame.display.set_mode((WIDTH, HEIGHT))
        pygame.display.set_caption('DanceDanceCV')
        screen = pygame.display.get_surface()
        self.view = View(screen, self.detect, self.song)

    def run(self):
        playing = True
        while playing:
            for events in pygame.event.get():
                if events.type == QUIT:
                    return 'quit'
            self.detect.run()
            self.view.run()
            pygame.display.update()
        pygame.quit()
Example #7
0
class Ui_MainWindow(QtWidgets.QWidget):
    def __init__(self, parent=None):
        super().__init__(parent)
        self.timer_camera = QtCore.QTimer()
        self.set_ui()
        self.slot_init()
        self.TIME = 0
        self.detector = Detect()
        self.sgbm = SGBM()
        self.sgbm.Init_SGBM()
        self.Camera = cv.VideoCapture()
        self.CAM_NUM = 1

    def set_ui(self):
        self.__layout_main = QtWidgets.QHBoxLayout()  # 总布局
        self.__layout_data_show = QtWidgets.QVBoxLayout()  # 数据(视频)显示布局
        self.__layout_fun_button = QtWidgets.QVBoxLayout()  # 按键布局
        self.button_start = QtWidgets.QPushButton('开始识别')  # 建立用于打开摄像头的按键
        self.button_close = QtWidgets.QPushButton('退出')  # 建立用于退出程序的按键
        self.button_model2 = QtWidgets.QPushButton('MobileNet2')  # 建立用于退出程序的按键
        self.button_model3 = QtWidgets.QPushButton('MobileNet3')  # 建立用于退出程序的按键
        self.button_start.setMinimumHeight(200)  # 设置按键大小
        self.button_close.setMinimumHeight(200)
        self.button_model2.setMinimumHeight(200)
        self.button_model3.setMinimumHeight(200)

        self.button_close.move(10, 100)  # 移动按键
        # '''信息显示'''
        # self.textEdit = QTextEdit()
        # self.textEdit.setFixedSize(400, 800)
        '''显示的视频窗口'''
        self.pix = QPixmap('./background.jpg')
        self.label_show_camera = QtWidgets.QLabel()  # 定义显示视频的Label
        self.label_show_camera.setFixedSize(960,
                                            720)  # 给显示视频的Label设置大小为641x481
        self.label_show_camera.setStyleSheet(
            'background-color:rgb(96,96,96)')  #设置背景颜色
        self.label_show_camera.setPixmap(self.pix)
        '''把某些控件加入到总布局中'''
        self.__layout_main.addLayout(self.__layout_fun_button)  # 把按键布局加入到总布局中
        self.__layout_main.addWidget(
            self.label_show_camera)  # 把用于显示视频的Label加入到总布局中
        # self.__layout_main.addWidget(self.textEdit)
        '''把按键加入到按键布局中'''
        self.__layout_fun_button.addWidget(
            self.button_start)  # 把打开摄像头的按键放到按键布局中
        self.__layout_fun_button.addWidget(
            self.button_close)  # 把退出程序的按键放到按键布局中
        self.__layout_fun_button.addWidget(
            self.button_model2)  # 把打开摄像头的按键放到按键布局中
        self.__layout_fun_button.addWidget(
            self.button_model3)  # 把打开摄像头的按键放到按键布局中
        '''总布局布置好后就可以把总布局作为参数传入下面函数'''
        self.setLayout(self.__layout_main)  # 到这步才会显示所有控件

    '''初始化所有槽函数'''

    def slot_init(self):
        self.button_start.clicked.connect(
            self.button_start_clicked)  # 若该按键被点击,则调用button_start_clicked()
        self.button_model2.clicked.connect(
            self.button_model2_clicked)  # 若该按键被点击,则调用button_start_clicked()
        self.button_model3.clicked.connect(
            self.button_model3_clicked)  # 若该按键被点击,则调用button_start_clicked()
        self.timer_camera.timeout.connect(
            self.show_camera)  # 若定时器结束,则调用show_camera()
        self.button_close.clicked.connect(
            self.close
        )  # 若该按键被点击,则调用close(),注意这个close是父类QtWidgets.QWidget自带的,会关闭程序

    '''槽函数之一'''

    def button_start_clicked(self):
        if self.timer_camera.isActive() == False:  # 若定时器未启动
            flag = self.Camera.open(self.CAM_NUM)
            if flag == False:
                msg = QtWidgets.QMessageBox.warning(
                    self,
                    'warning',
                    "请检查相机于电脑是否连接正确",
                    buttons=QtWidgets.QMessageBox.Ok)
            else:
                self.timer_camera.start(1)  # 定时器开始计时30ms,结果是每过30ms从摄像头中取一帧显示
                self.button_start.setText('结束识别')
        else:
            self.timer_camera.stop()  # 关闭定时器
            self.Camera.release()
            self.label_show_camera.clear()  # 清空视频显示区域
            self.button_start.setText('开始识别')

    def button_model2_clicked(self):
        self.detector.Init_Net("mb2-ssd")

    def button_model3_clicked(self):
        self.detector.Init_Net("mb3-large-ssd")

    def SegmentFrame(self, Fream):
        double = cv.resize(Fream, (640, 240), cv.INTER_AREA)
        left = double[0:240, 0:320]
        right = double[0:240, 320:640]
        return left, right

    def show_camera(self):
        # Camera = cv.VideoCapture(1)
        # if not Camera.isOpened():
        #     print("Could not open the Camera")
        #     sys.exit()

        ret, Fream = self.Camera.read()
        # cv.imwrite("Two.jpg",Fream)
        os.system("./camera.sh")
        while (1):
            ret, Fream = self.Camera.read()
            if not ret:
                break
            LeftImage, RightImage = self.SegmentFrame(Fream)
            # start = time()
            result_rect = self.detector.detect(LeftImage)
            distance, disp = self.sgbm.Coordination(LeftImage, RightImage,
                                                    result_rect)
            result = LeftImage.copy()
            for i in range(0, len(result_rect)):
                cv.rectangle(result, (result_rect[i][0], result_rect[i][1]),
                             (result_rect[i][2], result_rect[i][3]),
                             (255, 255, 0), 4)
                cv.putText(
                    result,
                    str(distance[i]),
                    (result_rect[i][0] + 20, result_rect[i][1] + 40),
                    cv.FONT_HERSHEY_SIMPLEX,
                    1,  # font scale
                    (255, 0, 255),
                    2)  # line type
            # end = time()
            # seconds = end - start
            # fps = 1/seconds
            # print( "Estimated frames per second : {0}".format(fps))
            # cv.imshow("left",LeftImage)
            # cv.imshow("right",RightImage)
            # cv.imshow("disp",disp)
            # cv.imshow("result", coordinate)
            disp = cv.cvtColor(disp, cv.COLOR_GRAY2BGR)
            htich1 = np.hstack((LeftImage, RightImage))
            htich2 = np.hstack((disp, result))
            vtich = np.vstack((htich1, htich2))
            # cv.imshow("result", vtich)

            if cv.waitKey(1) == ord('q'):
                break
            show = cv.resize(vtich, (960, 720))  # 把读到的帧的大小重新设置为 1280x960
            show = cv.cvtColor(show, cv.COLOR_BGR2RGB)
            # while(1):
            showImage = QtGui.QImage(
                show.data, show.shape[1], show.shape[0],
                QtGui.QImage.Format_RGB888)  # 把读取到的视频数据变成QImage形式
            self.label_show_camera.setPixmap(
                QtGui.QPixmap.fromImage(showImage))  # 往显示视频的Label里 显示QImage
            return
Example #8
0
 def run(self):
     if not self.args.no_sim:
         self.sim.run()
     Detect.run(self)
     return self.detector
Example #9
0
cap = cv2.VideoCapture(cv2.CAP_DSHOW)
while True:
    ret, image = cap.read()

    if ret:
        no_mask_faces = []

        # Cal Frame
        curTime = time.time()
        sec = curTime - prevTime
        prevTime = curTime
        fps = 1 / (sec)

        print(fps)
        Mask_Detect = Detect(image)

        # Detect Face
        Mask_Detect.detectFace()

        if len(Mask_Detect.face_lst) != 0:

            # Detect Mask & NOSE & MASK IN FACE
            Mask_Detect.detectMaskNose()

            # 조건에 맞게 결과 도출
            for face_info in Mask_Detect.face_lst:
                x1, y1, x2, y2 = face_info["roi_face"]
                mask_status = face_info["with_mask"]
                nose_status = face_info["with_nose"]
                color = (0, 0, 0)
Example #10
0
        for nX in range(length):
            if each_col[nX] ^ start:
                if start is False:
                    start = True
                    start_index = nX
                else:
                    start = False
                    col_list.append([start_index, nX])
        return col_list


if __name__ == '__main__':
    image = cv2.imread("汉字_手写.jpg")
    from Detect import Detect
    rect_char = RectChar()
    gray_image = rect_char.get_gray_image(image)
    binary_image = rect_char.get_binary_image(gray_image)
    result_list = rect_char.get_char_list(binary_image)
    image_list = []

    rect_temp = None
    for rect in result_list:
        if rect_temp is not None and (rect_temp[2] > rect[0]
                                      and rect_temp[1] < rect[3]):
            image_list.append(None)
        image_list.append(image[rect[1]:rect[3], rect[0]:rect[2], :] / 255)
        rect_temp = rect
    detect = Detect()
    result = detect.find_class(image_list)
    print(result)
Example #11
0
import os
import sys
import cv2 as cv
from Detect import Detect
from SGBM import SGBM
from time import time
import numpy as np

detector = Detect()
detector.Init_Net()
sgbm = SGBM()
sgbm.Init_SGBM()

Camera = cv.VideoCapture(1)
if not Camera.isOpened():
    print("Could not open the Camera")
    sys.exit()

ret, Fream = Camera.read()
cv.imwrite("Two.jpg", Fream)
os.system("./camera.sh")


def SegmentFrame(Fream):
    double = cv.resize(Fream, (640, 240), cv.INTER_AREA)
    left = double[0:240, 0:320]
    right = double[0:240, 320:640]
    return left, right


while (True):
Example #12
0
 def run(self):
     if not self.args.no_sim:
         self.sim.run()
     Detect.run(self)
     return self.detector
Example #13
0
class Start(Thread):
    avoidThread = None
    detect = None

    def __init__(self,
                 routePoints,
                 sensorsAlgorithms={'Vision': [VisionDetectSVM]},
                 avoidClass=FixAvoid,
                 comunication=AirSimCommunication,
                 fusionAlgorithm=FusionData_Mean,
                 configPath='config.yml',
                 startPoint=None):
        Thread.__init__(self)
        self.start_point = startPoint
        self.status = 'start'
        # vehicleComunication = comunication.getVehicle()
        # Conectando ao simulador AirSim
        self.vehicleComunication = AirSimCommunication()
        self.control = Control(self.vehicleComunication, routePoints)
        self.unrealControl = UnrealCommunication()
        self.stop = False

        with open(configPath, 'r') as file_config:
            self.config = yaml.full_load(file_config)

        if avoidClass is not None:
            self.avoidThread = avoidClass(self, self.control)
        if sensorsAlgorithms is not None:
            self.detect = Detect(self,
                                 self.vehicleComunication,
                                 sensorsAlgorithms,
                                 self.avoidThread,
                                 fusionAlgorithm=fusionAlgorithm)

        #self.start()

    def start_run(self):
        # Start drone
        self.control.takeOff()
        # got to start point
        if self.start_point:
            print("Start point", self.start_point)
            self.vehicleComunication.moveToPoint(self.start_point[:3],
                                                 self.start_point[3], True)
        # Start move path
        self.control.start()
        time.sleep(2)
        # Start thread detect
        if self.detect is not None:
            self.detect.start()

    def run(self):
        self.start_run()
        #Wating from  time or collision
        max_time = time.time() + self.config['algorithm']['time_max']
        while not self.stop:
            time.sleep(1)
            if time.time() >= max_time:
                print("Max time execution")
                break
        #Reset Plane
        self.end_run()

    def end_run(self):
        #stop detect
        self.detect.stop = True
        if self.detect is not None:
            #Wating detect
            self.detect.join()
        #stop control
        if self.control is not None:
            self.control.join()
        #Reset Plane
        self.unrealControl.reset_plane()
        self.vehicleComunication.client.reset()
        #Delete detect thread
        del self.detect

    def get_status(self):
        return self.status

    def set_status(
        self,
        status,
    ):
        print("Voo status:", status)
        self.status = status
        if status == 'collision':
            self.detect.stop = True
            self.stop = True
Example #14
0
class MyApp(QtWidgets.QMainWindow, Ui_MainWindow):
    def __init__(self):
        QtWidgets.QMainWindow.__init__(self)
        Ui_MainWindow.__init__(self)
        self.setupUi(self)
        self.detect         = Detect()
        self.original_image = None
        self.after_image    = None
        self.pix_map        = None
        self.start          = False
        self.start_point    = None
        self.end_point      = None

    def resizeEvent(self, a0: QtGui.QResizeEvent):
        self.lbl_resize_event()

    def eventFilter(self, a0: 'QObject', a1: 'QEvent'):
        if a0 == self.lbl_image_origin:
            if self.original_image is None or self.start is False:
                return False
            if a1.type()   == QtCore.QEvent.MouseButtonPress and a1.button() == 1:
                self.start_point = a1.pos()
                return True
            elif a1.type() == QtCore.QEvent.MouseMove and self.start_point is not None:
                self.end_point   = a1.pos()
                self.draw_rect(a0, self.pix_map, a0.size(), self.pix_map.size(), self.start_point, self.end_point)
                return True
            elif a1.type() == QtCore.QEvent.MouseButtonPress and a1.button() == 2:
                self.start_point = None
                self.start       = False
                a0.setToolTip("")
                a0.setCursor(QtCore.Qt.ArrowCursor)
                a0.setPixmap(self.pix_map)
                return True
            elif a1.type() == QtCore.QEvent.MouseButtonRelease and a1.button() == 1:
                if self.start_point is not None:
                    self.end_point      = a1.pos()
                    distance = self.start_point - self.end_point
                    if abs(distance.y() * distance.x()) < 50: return True
                    location            = self.draw_rect(a0, self.pix_map, a0.size(), self.pix_map.size(), self.start_point, self.end_point)
                    self.original_image = self.original_image[location[1]:location[3], location[0]:location[2], :]
                    self.pix_map        = self.get_pix_from_mat(self.original_image, a0.width(), a0.height())
                    self.start_point    = None
                    self.end_point      = None
                    self.start          = False
                    a0.setToolTip("")
                    a0.setCursor(QtCore.Qt.ArrowCursor)
                    a0.setPixmap(self.pix_map)
                return True
            else:
                return False
        else:
            return self.eventFilter(a0, a1)

    def draw_rect(self, q_object, pix_map, out_size, in_size, start_x_y, end_x_y):
        distance = start_x_y-end_x_y
        min_x    = min(start_x_y.x(), end_x_y.x())
        min_y    = min(start_x_y.y(), end_x_y.y())
        width    = abs(distance.x())
        height   = abs(distance.y())
        temp     = pix_map.copy()
        painter  = QtGui.QPainter(temp)
        painter.setPen(QtGui.QColor(255, 0, 0))
        v_space, h_space = self.clac_space(out_size, in_size)
        ratio_x  = self.original_image.shape[1]/pix_map.width()
        ratio_y  = self.original_image.shape[0]/pix_map.height()
        painter.drawRect(min_x - h_space, min_y - v_space, width, height)
        painter.end()
        q_object.setPixmap(temp)
        return [int((min_x - h_space)*ratio_x), int((min_y - v_space)*ratio_y), int((min_x - h_space+width)*ratio_x), int((min_y - v_space+height)*ratio_y)]

    def clac_space(self, out_size, in_size):
        v_space = (out_size.height()-in_size.height())//2
        h_space = (out_size.width()-in_size.width())//2
        return v_space, h_space

    def get_pix_from_mat(self, image, width, height):
        h, w, channel = image.shape
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        image = QtGui.QImage(image, w, h, w * channel, QtGui.QImage.Format_RGB888)
        image = QtGui.QPixmap.fromImage(image)
        image = image.scaled(width-4, height-4, QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation)
        return image

    def get_boolean(self, check_state):
        return True if check_state != 0 else False

    def open_image(self):
        target              = self.lbl_image_origin
        file_name           = QtWidgets.QFileDialog.getOpenFileName(self, "Open Image", "", "Image File(*.jpeg *.jpg *.png *.bmp)")
        if file_name[0] == "": return
        self.original_image = cv2.imread(file_name[0])
        self.pix_map        = self.get_pix_from_mat(self.original_image, target.width(), target.height())
        target.setPixmap(self.pix_map)

    def sb_value_change(self):
        object_origin = {
            "sb_binary": self.sb_binary,
            "sb_ratio" : self.sb_ratio
        }
        object_target = {
            "sb_binary": self.sb_binary_value,
            "sb_ratio" : self.sb_ratio_value
        }
        object_ratio  = {
            "sb_binary": 1,
            "sb_ratio" : 0.01
        }
        origin = object_origin[self.sender().objectName()]
        target = object_target[self.sender().objectName()]
        value  = origin.value()*object_ratio[self.sender().objectName()]
        target.setText("{}".format(value) if value % 1 == 0 else "{:.2f}".format(value))

    def ck_value_change(self):
        object_origin = {
            "ck_print": self.ck_print
        }
        object_target = {
            "ck_print": self.sb_ratio
        }
        origin = object_origin[self.sender().objectName()]
        target = object_target[self.sender().objectName()]
        target.setEnabled(origin.checkState())

    def btn_click(self):
        object_target      = self.lbl_image_after
        object_origin_name = self.sender().objectName()

        if object_origin_name == "btn_split":
            if self.original_image is None: return
            self.start = True
            self.lbl_image_origin.setCursor(QtCore.Qt.CrossCursor)
            self.lbl_image_origin.setToolTip("通过鼠标左键拖动即可完成裁剪,中途可右键取消")
            return

        no_line = self.get_boolean(self.ck_noline.checkState())
        cut     = self.get_boolean(self.ck_cut.checkState())
        join    = self.get_boolean(self.ck_join.checkState())
        p_font  = self.get_boolean(self.ck_print.checkState())
        threshold_binary = int(self.sb_binary_value.text())
        threshold_ratio  = float(self.sb_ratio_value.text())

        rect_char    = RectChar(no_line, cut, join, p_font, threshold_binary, threshold_ratio)
        gray_image   = rect_char.get_gray_image(self.original_image)
        binary_image = rect_char.get_binary_image(gray_image)
        result_list  = rect_char.get_char_list(binary_image)
        result_image = None

        if object_origin_name   == "btn_gray":
            self.after_image = gray_image
            result_image     = self.get_pix_from_mat(gray_image, object_target.width(), object_target.height())
        elif object_origin_name == "btn_brinary":
            self.after_image = binary_image
            result_image     = self.get_pix_from_mat(binary_image, object_target.width(), object_target.height())
        elif object_origin_name == "btn_location":
            result_image     = self.original_image.copy()
            for char in result_list:
                cv2.rectangle(result_image, (char[0], char[1]), (char[2], char[3]), (0, 0, 255))
            self.after_image = result_image
            result_image     = self.get_pix_from_mat(result_image, object_target.width(), object_target.height())
        elif object_origin_name == "btn_recognize":
            image_list       = []
            rect_temp        = None
            for rect in result_list:
                if rect_temp is not None and (rect_temp[2] > rect[0] and rect_temp[1] < rect[3]):
                    image_list.append(None)
                image_list.append(gray_image[rect[1]:rect[3], rect[0]:rect[2], :] / 255)
                rect_temp = rect
            result_str       = self.detect.find_class(image_list)
            self.text_result.setText(result_str)

        if result_image is not None:
            object_target.setPixmap(result_image)

    def lbl_resize_event(self):
        lbl = [self.lbl_image_origin, self.lbl_image_after]
        img = [self.original_image, self.after_image]

        for index in range(len(lbl)):
            if img[index] is None: continue
            target = lbl[index]
            image  = self.get_pix_from_mat(img[index], target.width(), target.height())
            if index == 0: self.pix_map = image
            target.setPixmap(image)