Beispiel #1
0
    def __init__(self,
                 routePoints,
                 sensorsAlgorithms={'Vision': [VisionDetectSVM]},
                 avoidClass=FixAvoid,
                 comunication=AirSimCommunication,
                 fusionAlgorithm=FusionData_Mean,
                 configPath='config.yml',
                 startPoint=None):
        Thread.__init__(self)
        self.start_point = startPoint
        self.status = 'start'
        # vehicleComunication = comunication.getVehicle()
        # Conectando ao simulador AirSim
        self.vehicleComunication = AirSimCommunication()
        self.control = Control(self.vehicleComunication, routePoints)
        self.unrealControl = UnrealCommunication()
        self.stop = False

        with open(configPath, 'r') as file_config:
            self.config = yaml.full_load(file_config)

        if avoidClass is not None:
            self.avoidThread = avoidClass(self, self.control)
        if sensorsAlgorithms is not None:
            self.detect = Detect(self,
                                 self.vehicleComunication,
                                 sensorsAlgorithms,
                                 self.avoidThread,
                                 fusionAlgorithm=fusionAlgorithm)
Beispiel #2
0
 def detect(self):
     # 呼叫Detect.py檔,做rule-base偵測
     Det = Detect(self.filename_pro)
     Det.process()
     Det.trend()
     self.data_rule_df = Det.signal()
     Det.result()
     self.save(self.data_rule_df, self.filename_rule, 'csv')
Beispiel #3
0
 def __init__(self, parent=None):
     super().__init__(parent)
     self.timer_camera = QtCore.QTimer()
     self.set_ui()
     self.slot_init()
     self.TIME = 0
     self.detector = Detect()
     self.sgbm = SGBM()
     self.sgbm.Init_SGBM()
     self.Camera = cv.VideoCapture()
     self.CAM_NUM = 1
Beispiel #4
0
 def __init__(self, song):
     self.audio = Audio()
     self.audio.preOpen()
     self.detect = Detect()
     pygame.init()
     self.audio.open()
     self.song = LoadSong(song).song
     pygame.display.set_mode((WIDTH, HEIGHT))
     pygame.display.set_caption('DanceDanceCV')
     screen = pygame.display.get_surface()
     self.view = View(screen, self.detect, self.song)
Beispiel #5
0
def detect_key(wav_location):
    D = Detect()
    bpm = D.detect(wav_location)
    sys.exit(1)
    score = music21.converter.parse(os.getcwd() + '/' + file_name)
    key1 = score.analyze('Krumhansl')
    key2 = score.analyze('AardenEssen')
    if key1 != key2:
        print(key1, key2)
    else:
        print(key1)
Beispiel #6
0
 def __init__(self):
     QtWidgets.QMainWindow.__init__(self)
     Ui_MainWindow.__init__(self)
     self.setupUi(self)
     self.detect         = Detect()
     self.original_image = None
     self.after_image    = None
     self.pix_map        = None
     self.start          = False
     self.start_point    = None
     self.end_point      = None
Beispiel #7
0
    def btn_click(self):
        object_target = self.lbl_image_after
        object_origin_name = self.sender().objectName()

        no_line = self.get_boolean(self.ck_noline.checkState())
        cut = self.get_boolean(self.ck_cut.checkState())
        join = self.get_boolean(self.ck_join.checkState())
        p_font = self.get_boolean(self.ck_print.checkState())
        threshold_binary = int(self.sb_binary_value.text())
        threshold_ratio = float(self.sb_ratio_value.text())

        rect_char = RectChar(no_line, cut, join, p_font, threshold_binary,
                             threshold_ratio)
        gray_image = rect_char.get_gray_image(self.original_image)
        binary_image = rect_char.get_binary_image(gray_image)
        result_list = rect_char.get_char_list(binary_image)
        result_image = None

        if object_origin_name == "btn_split":
            print("123")
        elif object_origin_name == "btn_gray":
            result_image = self.get_pix_from_mat(gray_image,
                                                 object_target.width(),
                                                 object_target.height())
        elif object_origin_name == "btn_brinary":
            result_image = self.get_pix_from_mat(binary_image,
                                                 object_target.width(),
                                                 object_target.height())
        elif object_origin_name == "btn_location":
            result_image = self.original_image.copy()
            for char in result_list:
                cv2.rectangle(result_image, (char[0], char[1]),
                              (char[2], char[3]), (0, 0, 255))
            result_image = self.get_pix_from_mat(result_image,
                                                 object_target.width(),
                                                 object_target.height())
        elif object_origin_name == "btn_recognize":
            image_list = []
            rect_temp = None
            for rect in result_list:
                if rect_temp is not None and (rect_temp[2] > rect[0]
                                              and rect_temp[1] < rect[3]):
                    image_list.append(None)
                image_list.append(
                    gray_image[rect[1]:rect[3], rect[0]:rect[2], :] / 255)
                rect_temp = rect
            detect = Detect()
            result_str = detect.find_class(image_list)
            self.text_result.setText(result_str)
            print(result_str)

        if result_image is not None:
            object_target.setPixmap(result_image)
Beispiel #8
0
cap = cv2.VideoCapture(cv2.CAP_DSHOW)
while True:
    ret, image = cap.read()

    if ret:
        no_mask_faces = []

        # Cal Frame
        curTime = time.time()
        sec = curTime - prevTime
        prevTime = curTime
        fps = 1 / (sec)

        print(fps)
        Mask_Detect = Detect(image)

        # Detect Face
        Mask_Detect.detectFace()

        if len(Mask_Detect.face_lst) != 0:

            # Detect Mask & NOSE & MASK IN FACE
            Mask_Detect.detectMaskNose()

            # 조건에 맞게 결과 도출
            for face_info in Mask_Detect.face_lst:
                x1, y1, x2, y2 = face_info["roi_face"]
                mask_status = face_info["with_mask"]
                nose_status = face_info["with_nose"]
                color = (0, 0, 0)
Beispiel #9
0
        for nX in range(length):
            if each_col[nX] ^ start:
                if start is False:
                    start = True
                    start_index = nX
                else:
                    start = False
                    col_list.append([start_index, nX])
        return col_list


if __name__ == '__main__':
    image = cv2.imread("汉字_手写.jpg")
    from Detect import Detect
    rect_char = RectChar()
    gray_image = rect_char.get_gray_image(image)
    binary_image = rect_char.get_binary_image(gray_image)
    result_list = rect_char.get_char_list(binary_image)
    image_list = []

    rect_temp = None
    for rect in result_list:
        if rect_temp is not None and (rect_temp[2] > rect[0]
                                      and rect_temp[1] < rect[3]):
            image_list.append(None)
        image_list.append(image[rect[1]:rect[3], rect[0]:rect[2], :] / 255)
        rect_temp = rect
    detect = Detect()
    result = detect.find_class(image_list)
    print(result)
Beispiel #10
0
import os
import sys
import cv2 as cv
from Detect import Detect
from SGBM import SGBM
from time import time
import numpy as np

detector = Detect()
detector.Init_Net()
sgbm = SGBM()
sgbm.Init_SGBM()

Camera = cv.VideoCapture(1)
if not Camera.isOpened():
    print("Could not open the Camera")
    sys.exit()

ret, Fream = Camera.read()
cv.imwrite("Two.jpg", Fream)
os.system("./camera.sh")


def SegmentFrame(Fream):
    double = cv.resize(Fream, (640, 240), cv.INTER_AREA)
    left = double[0:240, 0:320]
    right = double[0:240, 320:640]
    return left, right


while (True):