コード例 #1
0
 def __init__(self):
     self.camera = CSI_Camera()
     self.net = cv2.dnn_DetectionModel("model.cfg", "model.weights")
     self.gpu_template_img = cv2.cuda_GpuMat(TEMPLATE_IMAGE_ACE)
     self.gpu_target_img = cv2.cuda_GpuMat()
     ###### Lighter information #######
     self.head_width = 0
     self.body_height = 0
     self.upper_sticker_bound = 0
     self.lower_sticker_bound = 0
     self.sticker_poses = []   # Lighter's sticker position for each lighter
     self.error_sticker_images = []
     # Manual camera setting variables, initialize with default size.
     self.manual_box_x           = DISPLAY_WIDTH // 2
     self.manual_box_y           = DISPLAY_HEIGHT // 2
     self.manual_box_width       = 150
     self.manual_box_height      = 300
     
     ###### Image Information ######
     self.display_contrast       = 30    # Default contrast 110%
     self.display_brightness     = 5     # Default brightness 105%
     
     self.initialize_camera()
     self.initialize_yolo()
コード例 #2
0
ファイル: test_length.py プロジェクト: jyjun1015/new_bob
def checkHeadRatio(raw, stick) :    # 라이터 헤드를 찾기 위한 좌표 추정 함수
    return int((stick-raw) * (5/105))    #헤드 간 간격/몸통 길이

def checkStickRatio(raw) :
    return int(raw * (490/260))

def checkStickerRatio(raw, stick) :
    return int((stick-raw)*(45/216)), int((stick-raw)*(133/216))

def checkStickerRatio2(raw, stick) :
    return int((stick-raw)*(134/216)), int((stick-raw)*(175/216))

  
# 로컬에 화면 캡쳐 이미지를 저장함
camera = CSI_Camera()
camera.create_gstreamer_pipeline(
    sensor_id = 0,
    sensor_mode = 1,
    framerate = 30,
    flip_method = 2,
    display_height = 720,
    display_width = 1280
)
camera.open(camera.gstreamer_pipeline)
camera.start()
# cv2.namedWindow("Sticker Solution", cv2.WINDOW_AUTOSIZE)

    
    
def getStart():
コード例 #3
0
    def __init__(self):
        # Pi-Camera for jetson nano (CSI_Camera)
        self.camera = CSI_Camera()
        self.gpu_frame = cv2.cuda_GpuMat()

        # 4:3 Resolution (Capture Resolution: 4032 x 3040)
        self.display_width = 1280  # Display width (not captured width)
        self.display_height = 960  # Display height (not captured height)
        self.upper_not_roi = self.display_height // 4  # Default value of ROI
        self.lower_not_roi = 3 * self.display_height // 4  # Default value of ROI

        # Load YOLOv4
        self.net = cv2.dnn_DetectionModel("yolov4-tiny.cfg",
                                          "yolov4-tiny-final.weights")
        self.net.setInputSize(448, 448)  # It can be (416, 416) either
        self.net.setInputScale(1.0 / 255)  # Scaled by 1byte [0, 255]
        self.net.setInputSwapRB(True)  # Swap BGR order to RGB
        self.net.setPreferableTarget(
            cv2.dnn.DNN_TARGET_CUDA)  # For using CUDA GPU
        self.net.setPreferableBackend(
            cv2.dnn.DNN_BACKEND_CUDA)  # For using CUDA GPU

        # Standard Lines
        self.upper_std_line = 0  # Lighter's standard line which is used for getting height
        self.lower_std_line = 0  # Lighter's standard line which is used for getting height
        self.upper_normal_line = 0  # Normal bound line to decide whether lighter has defect or not
        self.lower_normal_line = 0  # Normal bound line to decide whether lighter has defect or not

        # QSlider bars
        # ROI which can be controlled by user
        self.roi_upper = 0
        self.roi_lower = self.display_height
        self.roi_col = 0
        # Lighter's standard line which can be controlled by user
        self.std_upper = 0
        self.std_lower = self.display_height

        # Lighter Informations
        self.lighter_width = 0  # Thickness of one lighter
        self.lighter_height = 0  # Height of lighter

        # Image filters
        # Sobel filter is used to extract the outline of an image.
        self.sobel_filter = cv2.cuda.createSobelFilter(cv2.CV_8U,
                                                       cv2.CV_8U,
                                                       0,
                                                       1,
                                                       ksize=5,
                                                       scale=0.25)
        # Hough segment filter is used to extract standard line of lighter.
        # You can decrease first two arguments if you want higher precision.
        # 3rd arg: minimum length of line
        # 4th arg: minimum gap of each lines
        self.hough_seg_filter = cv2.cuda.createHoughSegmentDetector(
            0.1, np.pi / 180, 80, 1)

        signal.signal(
            signal.SIGINT,
            self.sigint_handler)  # Allocate Ctrl + C (SIGINT)'s handler.
        self.initialize_camera()
        self.load_lighter_info()
コード例 #4
0
class Yolo:
    """
    Yolo class is used for getting images from camera and judging defect existance.
    Flow is serial process which is consisted of 
        1) Initialize camera (Pi-Camera HQ (IMX477))
        2) Load lighter information which saved previous execution.
        3) Set standard lines and normal lines
        4) Find defects that may be in a single set of lighters.
    """
    def __init__(self):
        # Pi-Camera for jetson nano (CSI_Camera)
        self.camera = CSI_Camera()
        self.gpu_frame = cv2.cuda_GpuMat()

        # 4:3 Resolution (Capture Resolution: 4032 x 3040)
        self.display_width = 1280  # Display width (not captured width)
        self.display_height = 960  # Display height (not captured height)
        self.upper_not_roi = self.display_height // 4  # Default value of ROI
        self.lower_not_roi = 3 * self.display_height // 4  # Default value of ROI

        # Load YOLOv4
        self.net = cv2.dnn_DetectionModel("yolov4-tiny.cfg",
                                          "yolov4-tiny-final.weights")
        self.net.setInputSize(448, 448)  # It can be (416, 416) either
        self.net.setInputScale(1.0 / 255)  # Scaled by 1byte [0, 255]
        self.net.setInputSwapRB(True)  # Swap BGR order to RGB
        self.net.setPreferableTarget(
            cv2.dnn.DNN_TARGET_CUDA)  # For using CUDA GPU
        self.net.setPreferableBackend(
            cv2.dnn.DNN_BACKEND_CUDA)  # For using CUDA GPU

        # Standard Lines
        self.upper_std_line = 0  # Lighter's standard line which is used for getting height
        self.lower_std_line = 0  # Lighter's standard line which is used for getting height
        self.upper_normal_line = 0  # Normal bound line to decide whether lighter has defect or not
        self.lower_normal_line = 0  # Normal bound line to decide whether lighter has defect or not

        # QSlider bars
        # ROI which can be controlled by user
        self.roi_upper = 0
        self.roi_lower = self.display_height
        self.roi_col = 0
        # Lighter's standard line which can be controlled by user
        self.std_upper = 0
        self.std_lower = self.display_height

        # Lighter Informations
        self.lighter_width = 0  # Thickness of one lighter
        self.lighter_height = 0  # Height of lighter

        # Image filters
        # Sobel filter is used to extract the outline of an image.
        self.sobel_filter = cv2.cuda.createSobelFilter(cv2.CV_8U,
                                                       cv2.CV_8U,
                                                       0,
                                                       1,
                                                       ksize=5,
                                                       scale=0.25)
        # Hough segment filter is used to extract standard line of lighter.
        # You can decrease first two arguments if you want higher precision.
        # 3rd arg: minimum length of line
        # 4th arg: minimum gap of each lines
        self.hough_seg_filter = cv2.cuda.createHoughSegmentDetector(
            0.1, np.pi / 180, 80, 1)

        signal.signal(
            signal.SIGINT,
            self.sigint_handler)  # Allocate Ctrl + C (SIGINT)'s handler.
        self.initialize_camera()
        self.load_lighter_info()

    def initialize_camera(self):
        """
        Args:
        
        Returns:
        
        Raises:
            camera.video_capture.isOpened():
                Camera module cannot capture images.
                Something wrong in camera module(HW) or nvargus-daemon(SW)
                'sudo systemctl restart nvargus-daemon' can be help.
                self.quit_program() function performs that role.
                
        Note:
            CIS Image sensor (IMX477) provides maximum 29FPS in sensor mode 0.
            You do not need to modify 'sensor_id' and 'sensor_mode' unless you use only one camera module.
            'flip_method' is set to '2' for flipping 180 degree image clockwise.
        """

        self.camera.create_gstreamer_pipeline(
            sensor_id=0,  # Camera sensor ID      (Do not need modify)
            sensor_mode=
            0,  # Camera sensor mode    (IMX477 driver provides 4 option)
            framerate=
            30,  # Camera framerate      (10FPS used for this application) 
            flip_method=2,  # Flip image 90 degree (90 x 2 = 180)
            display_height=self.display_height,
            display_width=self.display_width)
        self.camera.open(self.camera.gstreamer_pipeline)
        # [Exception handling] :: Camera can't start.
        if not self.camera.video_capture.isOpened():
            print("[Exception] :: Unable to open camera")
            self.quit_program()
        self.camera.start()  # Start camera (Make threads and execute).

    def make_ROI_screen(self, img):
        """
        Args:
            img:
                The image for which the Region of Interest (ROI) is to be set.
                
        Returns:
            bool:
                Variable to indicate whether a function is successful.
            img:
                The image with ROI of which pixels in some areas have changed to zero.
                
        Raises:
            Invalid image:
                if img is None, it should be returned.
        
        Note:
            'roi_upper', 'roi_lower' and 'roi_col' is controlled by user.
            Coordinates get larger as they go from top to bottom. 
            It's easy to understand when you think of a two-dimensional array or list.
            Therefore, the coordinates of the lower boundary are bigger than the upper boundary.
        """
        # [Exception handling] :: The image is invalid.
        if img is None:
            print("[Exception] :: There is no image to making ROI")
            return False, img

        img[:self.roi_upper, :] = img[
            self.roi_lower:, :] = 0  # Remove the top and bottom areas.
        img[:, :self.
            roi_col] = img[:, self.display_width - self.
                           roi_col:] = 0  # Remove the left and right areas.
        return True, img

    def calculate_lighter_info(self, img, roi=10, loop=50):
        """
        Args:
            img:
                The image that we want to know properties(width, hegith, standard line). 
            roi:
                The pixel value of the area where the upper standard line is expected to be found.
                If the 'roi_upper' is properly set, upper standard line must exist within 10px.
            loop:
                The number of iterations of a function to find more accurate return values.
                As a result of the experiment, there was no significant difference from 50 or higher.
                
        Returns:
            bool:
                Variable to indicate whether a function is successful.
            width:
                Thickness of one lighter.
                It is detected as 98 ~ 101px at a distance of approximately 30 ~ 33cm.
            height:
                Height of a set of lighter.
                It is detected as 380 ~ 404px at a distance of approximately 30 ~ 33cm.
                In general, the height is about four times the width.
                
        Raises:
            Invalid image:
                if img is None, it should be returned.
        
        Note:
            'roi_upper', 'roi_lower' and 'roi_col' is controlled by user.
            Coordinates get larger as they go from top to bottom. 
            It's easy to understand when you think of a two-dimensional array or list.
            Therefore, the coordinates of the lower boundary are bigger than the upper boundary.
            
            3 is added to 'width', which is the correction value obtained from the experiment.
            This is because the line obtained by the hough filter can be omitted in both ends about 1, 2 pixels.
            
            3.95 is multiplied to 'width' because the actual length of the fuel tank is measured.
            
        """

        widths = [
        ]  # Calulated thicknesses of each lighter. (there are loop elements)
        upper_lines = [
        ]  # Configured lines of a set of lighter. (there are loop elements)
        self.gpu_frame.upload(
            img)  # Upload image to GPU side for optimization.

        for _ in range(loop):
            width_candidates = []
            upper_line_candidates = []
            # Apply 'hough segmentation filter' to the image and download image to CPU side from GPU side.
            lines = self.hough_seg_filter.detect(self.gpu_frame).download()
            if lines is None:
                continue  # [Exception 1] :: No line detected. Something wrong.
            for line in lines:
                for x1, y1, x2, y2 in line:  # Each line is consisted of start and end coords.
                    # y-coordinate exists in an area where the upper standard line is likely to exist.
                    if self.roi_upper < y1 < self.roi_upper + roi:
                        upper_line_candidates.append(y1)
                        width_candidates.append(
                            math.sqrt(
                                math.pow(x2 - x1, 2) + math.pow(y2 - y1, 2)))
            # [Exception 2] :: No valid line detected.
            if len(upper_line_candidates) == 0 or len(width_candidates) == 0:
                continue
            # Get upper standard line's y-coordinate and thickness of lighter.
            upper_lines.append(
                sum(upper_line_candidates) / len(upper_line_candidates))
            widths.append(sum(width_candidates) / len(width_candidates))
        # [Exception 3] :: No valid data detected. Something wrong in image. Retry.
        if len(widths) is 0 or len(upper_lines) is 0: return False, 0, 0, 0

        width = int(round(
            sum(widths, 0) / len(widths))) + 3  # Get lighter's thickness
        upper_line = int(round(sum(upper_lines, 0) /
                               len(upper_lines)))  # Get upper_std_line
        height = int(round(width * 3.95))  # Get lighter's height
        return True, width, height, upper_line

    def get_camera_image(self):
        """
        Args:
                
        Returns:
            img:
                The image from camera sensor.
            
        Raises:
            Invalid image:
                If return flag of 'make_ROI_screen()' or `camera.read()` function is False, there is something wrong in image.
                Therefore, we need to get new image from camera.
                But we should make proper exception handler since it can make infinite loop in case of cameara malfunction.
        
        Note:
            Get camera image and process image are consisted of below steps.
                1) Get image from pi-camera(IMX477).
                2) Convert image from RGB to gray through `cv2.cvtColor()`.
                3) Upload image to GPU side from CPU side.
                4) Apply sobel filter to image.
                5) Download image from GPU side and apply ROI to image.
                6) Convert image from gray to binary for optimization.
                
            Be cautious to make any `cv2.cuda_GpuMat()` because \
            upload and download image between GPU side and CPU side can be serious overhead.
            
        """
        ret, frame = self.camera.read()  # step 1
        if ret == True:
            gray_frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)  # step 2
            self.gpu_frame.upload(gray_frame)  # step 3
            self.gpu_frame = self.sobel_filter.apply(self.gpu_frame)  # step 4
            img = self.gpu_frame.download()  # step 5
            result, img = self.make_ROI_screen(img)
            if result is False:
                print(
                    "[Exception] :: There is something wrong in an image to making ROI"
                )
                return self.get_camera_image(
                )  # TODO :: Make proper exception handling
            _, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY +
                                   cv2.THRESH_OTSU)  # step 6
            return img
        else:
            print(
                "[Exception] :: Read image from CSI_Camera is failed!, retry..."
            )
            return self.get_camera_image(
            )  # TODO :: Make proper exception handling.

    def test(self, img, err_rate=[], flag=False):
        """
        Args:
            img:
                Image that requires determining whether it is defective.
            err_rate:
                2D array which indicates error rates of each lighter.
                Each index represents a lighter number.
            flag:
                A flag variable to determine whether a call is for loading a YOLO model or a call for judgement.
                Because the first time this function is called, a long delay (approximately 3 seconds) occurs, 
                we call this function at program execution to pass the delay in advance before full-scale judgment is performed.
                
        Returns:
            img:
                The image which is drawed rectangles and labels.
                But final result doesn't need this image, so it is only for debugging purpose.
            
        Raises:
        
        Note:
            YOLO model detects gas surface and makes boundary NMS box automatically.
            Of course, box and label are used only for debugging purpose.
            So in final release, we should delete every unnecessary part except 'center coords'.
            
        """
        try:
            classes, confidences, boxes = self.net.detect(img,
                                                          confThreshold=0.3,
                                                          nmsThreshold=0.3)
            # [Exception] :: Invalid image or setting screen phase.
            if len(boxes) == 0 or flag is True: return
            box_cnt = 0
            center_list = []
            # get center position of each box (gas surface).
            for classId, confidence, box in zip(classes.flatten(),
                                                confidences.flatten(), boxes):
                label = '%.2f' % confidence
                labelSize, _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX,
                                               0.5, 1)
                left, top, width, height = box
                label_width, label_height = labelSize

                # [Exception] :: The box is under of ROI.
                if top + height > self.lower_std_line: continue

                box_cnt += 1
                # Make rectangles of gas surface and label to image.
                cv2.rectangle(img, box, color=(0, 255, 255), thickness=2)
                cv2.rectangle(img, (left, top - label_height),
                              (left + label_width, top), (0, 255, 255),
                              cv2.FILLED)
                cv2.putText(img, label, (left, top), cv2.FONT_HERSHEY_SIMPLEX,
                            0.5, (0, 0, 0))
                # Get center pos of box.
                # center_x = left + (width // 2)
                # center_y = top + (height // 2)
                # center_list.append([center_x, center_y, left, top, width, height])
                center_list.append(box)

            center_list.sort(
                key=lambda x: x[0])  # Sort boxes in ascending order.
            num = 0
            prev = self.roi_col
            for pos in center_list:  # For every lighter,
                l, t, w, h = pos
                # Calculation to find the correct order if n-th lighter surface is not recognized.
                # Calculate how many lighters can fit between two adjacent lighters.
                num += ((abs(l - prev)) // self.lighter_width) + 1
                prev = l + w
                # TODO : Proper exception handling.
                if num > 10:
                    break  # A set of lighter must consists of 10-lighter.

                # Get error rate of each lighter
                if t < self.upper_normal_line:
                    hh = self.upper_normal_line - t  # Get 'excessive' error rate
                elif t + h > self.lower_normal_line:
                    hh = self.lower_normal_line - (t + h
                                                   )  # Get 'under' error rate
                else:
                    hh = 0.0  # If there is no defect, it has 0% error rate.
                err_rate[num - 1].append(hh / h)

            # print("\t{} boxes founded".format(box_cnt))
            return img

        except Exception as ex:
            print("[Error] : ", ex)
            self.quit_program()

    def solve(self, loop=5):
        """
        Args:
            loop:
                Number of images taken to increase accuracy.
                The images are taken once every 0.1 second when outter signal is recognized.
                The lighter set travels about every 1.8 seconds.
                The first 0.5 seconds are not appropriate because the surface shakes.
                So about 5 to 10 pictures are appropriate.
                
        Returns:
                
        Raises:
        
        Note:
            All print statements in this function are only for debugging purpose.
            
        """
        self.lighter_error = [
            0 for _ in range(10)
        ]  # Bool flag list whether n'th lighter has a defect or not
        lighter_error_rate = [[]
                              for _ in range(10)]  # n'th lighter's error rate
        try:
            for _ in range(loop):
                img = self.get_camera_image()
                img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
                img = self.test(img, lighter_error_rate)
                time.sleep(0.1)  # sleep for 0.1 seconds.
            # Decide whether n'th lighter is abnormal.
            for i, rate in enumerate(lighter_error_rate):
                if (len(rate) == 0):  # DBZ error prevention.
                    self.lighter_error[i] = 1
                    continue
                # Get average error rate of each lighter.
                err_rate = round(sum(rate) / len(rate), 2)
                # Lighter has defect when it's error rate is over 90%.
                if err_rate >= 0.9: self.lighter_error[i] = 2
                elif err_rate <= -0.9: self.lighter_error[i] = 1
                else: self.lighter_error[i] = 0
        except Exception as ex:
            print("[Error] : ", ex)
            self.quit_program()

    def set_screen_auto(self):
        """
        Args:
            
        Returns:
            
        Raises:
        
        Note:
            Because of `calculate_lighter_info()` function which gets lighter's height, width and standard lines,
            we can get lighter's information automatically.
            
        """
        result = False
        while result is False:
            img = self.get_camera_image()
            result, self.lighter_width, self.lighter_height, self.upper_std_line = self.calculate_lighter_info(
                img)
        self.lower_std_line = self.upper_std_line + self.lighter_height  # Lower boundary of lighter (tray)
        self.upper_normal_line = self.upper_std_line + int(
            self.lighter_height *
            (UPPER_NORMAL_BOUND_RATIO))  # Upper boundary of normal area.
        self.lower_normal_line = self.upper_std_line + int(
            self.lighter_height *
            (LOWER_NORMAL_BOUND_RATIO))  # Lower boundary of normal area.
        self.test(cv2.cvtColor(img, cv2.COLOR_GRAY2RGB), flag=True)
        self.save_lighter_info()
        # Unnecessary printing for debugging purpose

    def set_screen_manual(self):
        """
        Args:
            
        Returns:
            
        Raises:
        
        Note:
            User can control sliders and buttons to set lighter's properties.            
            Because sometimes automatic setting shows lack precision, user might set properties by himself.
            
        """
        self.upper_std_line = self.std_upper  # Upper standard boundary which is set by user.
        self.lower_std_line = self.std_lower  # Lower standard boundary which is set by user.
        self.lighter_height = self.std_lower - self.std_upper  # Get height of lighter.
        self.lighter_width = self.lighter_height // 4  # Get thickness of lighter.
        self.upper_normal_line = self.std_upper + int(
            self.lighter_height *
            (UPPER_NORMAL_BOUND_RATIO))  # Upper boundary of normal area.
        self.lower_normal_line = self.std_upper + int(
            self.lighter_height *
            (LOWER_NORMAL_BOUND_RATIO))  # Lower boundary of normal area.
        img = self.get_camera_image()
        self.test(cv2.cvtColor(img, cv2.COLOR_GRAY2RGB), flag=True)
        self.save_lighter_info()
        # Unnecessary printing for debugging purpose

    def load_lighter_info(self):
        """
        Args:
            
        Returns:
            
        Raises:
        
        Note:
            Loads several lighter properties saved by a previously run program.
            
        """
        try:
            f = open("old_lighter_info.txt", 'rt')
        except FileNotFoundError:
            print("Fail to load information file")
            return
        # Read five lines from text file.
        self.upper_std_line = int(
            f.readline().rstrip('\n'))  # Previous standard line boundary
        self.lower_std_line = int(
            f.readline().rstrip('\n'))  # Previous standard line boundary
        self.roi_upper = int(f.readline().rstrip('\n'))  # Previous ROI value
        self.roi_lower = int(f.readline().rstrip('\n'))  # Previous ROI value
        self.roi_col = int(f.readline().rstrip('\n'))  # Previous ROI value

        self.lighter_height = self.lower_std_line - self.upper_std_line  # Get height of lighter.
        self.lighter_width = self.lighter_height // 4  # Get thickness of lighter.
        self.upper_normal_line = self.upper_std_line + int(
            self.lighter_height *
            (UPPER_NORMAL_BOUND_RATIO))  # Upper boundary of normal area.
        self.lower_normal_line = self.upper_std_line + int(
            self.lighter_height *
            (LOWER_NORMAL_BOUND_RATIO))  # Lower boundary of normal area.
        # Unnecessary printing for debugging purpose

        f.close()

    def save_lighter_info(self):
        """
        Args:
            
        Returns:
            
        Raises:
        
        Note:
            Save several lighter properties for next run program.
            
        """
        f = open("old_lighter_info.txt", 'wt')
        f.write(str(self.upper_std_line) + '\n')
        f.write(str(self.lower_std_line) + '\n')
        f.write(str(self.roi_upper) + '\n')
        f.write(str(self.roi_lower) + '\n')
        f.write(str(self.roi_col) + '\n')
        # Unnecessary printing for debugging purpose
        f.close()

    def quit_program(self):
        """
        Args:
            
        Returns:
            
        Raises:
        
        Note:
            Shut down the program with proper resources collecting.
            Because 'camera' is made of multiple threads, proper resources collecting must be done.
            
        """
        self.camera.stop()
        self.camera.release()
        subprocess.call(["sudo -V", "systemctl", "restart", "nvargus-daemon"],
                        shell=True)
        sys.exit(0)

    def sigint_handler(self, sig, frame):
        # If Ctrl + C is called.
        self.quit_program()

    def yolo_loop(self):
        """
        Args:
            
        Returns:
            
        Raises:
        
        Note:
            This function does not perform any special functions, 
            but only performs to show images from the camera to the GUI in real time.
            And it visually helps user can easily control sliders and buttons.
            
        """
        img = self.get_camera_image()
        img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
        cv2.line(img, (self.roi_col, 0), (self.roi_col, self.display_height),
                 (200, 200, 200), 1)
        cv2.line(img, (self.display_width - self.roi_col, 0),
                 (self.display_width - self.roi_col, self.display_height),
                 (200, 200, 200), 1)
        cv2.line(img, (0, self.std_upper),
                 (self.display_width, self.std_upper), (200, 200, 200), 1)
        cv2.line(img, (0, self.std_lower),
                 (self.display_width, self.std_lower), (200, 200, 200), 1)
        cv2.line(img, (0, self.upper_std_line),
                 (self.display_width, self.upper_std_line), (255, 0, 255), 2)
        cv2.line(img, (0, self.lower_std_line),
                 (self.display_width, self.lower_std_line), (255, 0, 255), 2)
        cv2.line(img, (0, self.upper_normal_line),
                 (self.display_width, self.upper_normal_line), (0, 0, 255), 2)
        cv2.line(img, (0, self.lower_normal_line),
                 (self.display_width, self.lower_normal_line), (0, 0, 255), 2)
        return img
コード例 #5
0
def face_detect():
    face_cascade = cv2.CascadeClassifier(
        "/usr/share/opencv4/haarcascades/haarcascade_frontalface_default.xml")
    eye_cascade = cv2.CascadeClassifier(
        "/usr/share/opencv4/haarcascades/haarcascade_eye.xml")
    left_camera = CSI_Camera()
    left_camera.create_gstreamer_pipeline(
        sensor_id=0,
        sensor_mode=SENSOR_MODE_720,
        framerate=30,
        flip_method=0,
        display_height=DISPLAY_HEIGHT,
        display_width=DISPLAY_WIDTH,
    )
    left_camera.open(left_camera.gstreamer_pipeline)
    left_camera.start()
    cv2.namedWindow("Face Detect", cv2.WINDOW_AUTOSIZE)

    if (not left_camera.video_capture.isOpened()):
        # Cameras did not open, or no camera attached

        print("Unable to open any cameras")
        # TODO: Proper Cleanup
        SystemExit(0)
    try:
        # Start counting the number of frames read and displayed
        left_camera.start_counting_fps()
        while cv2.getWindowProperty("Face Detect", 0) >= 0:
            img = read_camera(left_camera, False)
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            faces = face_cascade.detectMultiScale(gray, 1.3, 5)

            for (x, y, w, h) in faces:
                cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
                roi_gray = gray[y:y + h, x:x + w]
                roi_color = img[y:y + h, x:x + w]
                eyes = eye_cascade.detectMultiScale(roi_gray)
                for (ex, ey, ew, eh) in eyes:
                    cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh),
                                  (0, 255, 0), 2)
            if show_fps:
                draw_label(
                    img, "Frames Displayed (PS): " +
                    str(left_camera.last_frames_displayed), (10, 20))
                draw_label(
                    img,
                    "Frames Read (PS): " + str(left_camera.last_frames_read),
                    (10, 40))
            cv2.imshow("Face Detect", img)
            left_camera.frames_displayed += 1
            keyCode = cv2.waitKey(5) & 0xFF
            # Stop the program on the ESC key
            if keyCode == 27:
                break
    finally:
        left_camera.stop()
        left_camera.release()
        cv2.destroyAllWindows()
コード例 #6
0
class Sticker:
    def __init__(self):
        self.camera = CSI_Camera()
        self.net = cv2.dnn_DetectionModel("model.cfg", "model.weights")
        self.gpu_template_img = cv2.cuda_GpuMat(TEMPLATE_IMAGE_ACE)
        self.gpu_target_img = cv2.cuda_GpuMat()
        ###### Lighter information #######
        self.head_width = 0
        self.body_height = 0
        self.upper_sticker_bound = 0
        self.lower_sticker_bound = 0
        self.sticker_poses = []   # Lighter's sticker position for each lighter
        self.error_sticker_images = []
        # Manual camera setting variables, initialize with default size.
        self.manual_box_x           = DISPLAY_WIDTH // 2
        self.manual_box_y           = DISPLAY_HEIGHT // 2
        self.manual_box_width       = 150
        self.manual_box_height      = 300
        
        ###### Image Information ######
        self.display_contrast       = 30    # Default contrast 110%
        self.display_brightness     = 5     # Default brightness 105%
        
        self.initialize_camera()
        self.initialize_yolo()
        
    def initialize_camera(self):
        self.camera.create_gstreamer_pipeline (
            sensor_id       = 0,
            sensor_mode     = 0,
            framerate       = 30,
            flip_method     = 2,
            display_height  = DISPLAY_HEIGHT,
            display_width   = DISPLAY_WIDTH
        )
        self.camera.open(self.camera.gstreamer_pipeline)
        self.camera.start()
    
    def initialize_yolo(self):
        self.net.setInputSize(416, 416)      # It can be (448, 448) either if you need.
        self.net.setInputScale(1.0 / 255)    # Scaled by 1byte [0, 255]
        self.net.setInputSwapRB(True)        # Swap BGR order to RGB
        self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)    # For using CUDA GPU
        self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)  # For using CUDA GPU
    
    def get_head_ratio(self, up, down) :      # 라이터 헤드를 찾기 위한 좌표 계산 함수
        return int((down-up) * (5/105)) # 헤드 간 간격/몸통 길이
        
    def load_sticker_info(self):
        if self.apply_btn_push_cnt == 0:
            try: f = open("old_ace_sticker_info.txt", 'rt')
            except FileNotFoundError: 
                print("Fail to load information file")
                return
        else:
            try: f = open("old_btn_sticker_info.txt", 'rt')
            except FileNotFoundError: 
                print("Fail to load information file")
                return
        # Read five lines from text file.
        self.manual_box_x          = int(f.readline().rstrip('\n'))    # Previous box's x axis pos
        self.manual_box_y          = int(f.readline().rstrip('\n'))    # Previous box's y axis pos
        self.manual_box_width      = int(f.readline().rstrip('\n'))    # Previous box's width
        self.manual_box_height     = int(f.readline().rstrip('\n'))    # Previous box's height
        # Unnecessary printing for debugging purpose
        print("<Previous information successfully loaded!>")
        f.close()
    
    def save_sticker_info(self):
        if self.apply_btn_push_cnt == 1:
            f = open("old_ace_sticker_info.txt", 'wt')
            f.write(str(self.manual_box_x) + '\n')
            f.write(str(self.manual_box_y) + '\n')
            f.write(str(self.manual_box_width) + '\n')
            f.write(str(self.manual_box_height) + '\n')
        else:
            f = open("old_btn_sticker_info.txt", 'wt')
            f.write(str(self.manual_box_x) + '\n')
            f.write(str(self.manual_box_y) + '\n')
            f.write(str(self.manual_box_width) + '\n')
            f.write(str(self.manual_box_height) + '\n')
        self.save_template_image()
        # Unnecessary printing for debugging purpose
        print("<New information successfully saved!>")
        f.close()

    def save_template_image(self):
        img = self.get_image()
        sx = self.manual_box_x
        ex = self.manual_box_x + self.manual_box_width
        sy = self.manual_box_y
        ey = self.manual_box_y + self.manual_box_height
        template_img = img[sy:ey, sx:ex]
        offset_x =  int(round(self.manual_box_width * 0.1))
        offset_y = int(round(self.manual_box_height) * 0.05)
        template_img = cv2.medianBlur(template_img, 3)
        template_img = template_img[offset_y : -offset_y, offset_x : -offset_x]
        save_location = "./template_img_ace.jpg" if self.apply_btn_push_cnt == 1 else "./template_img_btn.jpg"
        cv2.imwrite(save_location, template_img)

    def get_image(self):
        ret, img = self.camera.read()
        a = 1 + round(self.contrast_slider.value() / 100, 2)
        b = self.brightness_slider.value()
        img = cv2.convertScaleAbs(img, alpha = a, beta = b)
        return img if ret is True else None
    
    def show_image(self):
        img = self.get_image()
        if img is None: return
        for sticker in self.sticker_poses:
            cv2.rectangle(img, sticker, (255, 255, 0), 5)
        return img
    
    def show_image_manual_setting(self):
        img = self.get_image()
        if img is None: return
        box = np.array([self.manual_box_x, self.manual_box_y, self.manual_box_width, self.manual_box_height])
        cv2.rectangle(img, box, (0, 125, 255), 5)
        return img
        
    def set_camera_auto(self):
        head_widths = []
        head_heights = []
        head_lower_y = []
        
        img = self.get_image()
        self.sticker_poses.clear()
        
        classes, confidences, boxes = self.net.detect(img, confThreshold = 0.7, nmsThreshold = 0.7)
        for classId, confidence, box in zip(classes.flatten(), confidences.flatten(), boxes):
            x, y, w, h = box
            head_heights.append(h)
            head_widths.append(w)
            head_lower_y.append(y + h)
            self.sticker_poses.append([x])
        self.head_width             = int(round(sum(head_widths) / 10))
        self.upper_sticker_bound    = int(round(sum(head_lower_y) / 10))
        self.body_height            = int(round(self.head_width * 3.8))
        self.lower_sticker_bound    = self.upper_sticker_bound + self.body_height
        self.sticker_poses.sort(key = lambda x : x[0])
        for i in range(LIGHTER_COUNT):
            self.sticker_poses[i].extend([self.upper_sticker_bound, self.head_width, self.body_height])
            self.sticker_poses[i] = np.array(self.sticker_poses[i])

    def do_template_matching(self):
        if len(self.sticker_poses) is not 10: return self.set_camera_auto()
        
        img = self.get_image()
        end_y = self.lower_sticker_bound
        start_y = self.upper_sticker_bound
        self.error_sticker_images.clear()
        self.lighter_error_flag = [True for _ in range(LIGHTER_COUNT)]
        
        for idx, sticker_pos in enumerate(self.sticker_poses):
            start_x = sticker_pos[0]
            end_x = sticker_pos[0] + sticker_pos[2]
            sticker_img = img[start_y : end_y, start_x : end_x]
            sticker_img = cv2.medianBlur(sticker_img, 3)
            self.gpu_target_img.upload(sticker_img)
            result = TEMPLATE_MATCHER.match(self.gpu_target_img, self.gpu_template_img).download()
            score = cv2.minMaxLoc(result)[0]
            if score <= 0.09: self.lighter_error_flag[idx] = False
        
        # Check whether there is an error
        if True not in self.lighter_error_flag:
            self.sys_result_label.setText("정상 세트 [TM]")
            return
        
        for idx, result in enumerate(self.lighter_error_flag):
            if result is True:
                start_x = self.sticker_poses[idx][0]
                end_x = start_x + self.head_width
                error_sticker_image = img[start_y : end_y, start_x : end_x]
                error_sticker_image = cv2.resize(error_sticker_image, (int(self.head_width / 1.5), int(self.upper_sticker_bound / 1.5)))
                self.error_sticker_images.append([idx, error_sticker_image])
        
        self.check_barcode()
            
    def check_barcode(self):
        for loop_cnt in range(3):
            for sticker_num, sticker_img in self.error_sticker_images:
                if self.lighter_error_flag[sticker_num] is False: continue
                # Step 1. Substract blue color from sticker image.
                sticker_img_hsv = cv2.cvtColor(sticker_img, cv2.COLOR_BGR2HSV)
                mask_img = cv2.inRange(sticker_img_hsv, LOWER_BOUNDARY_BLUE, UPPER_BOUNDARY_BLUE)
                sticker_img = cv2.cvtColor(sticker_img, cv2.COLOR_BGR2GRAY)
                sticker_img = cv2.add(sticker_img, mask_img)
                # Step 2. Detect 1D barcode from sticker image.
                decoded = pyzbar.decode(sticker_img)
                if len(decoded) > 0 : self.lighter_error_flag[sticker_num] = False
            if True not in self.lighter_error_flag:
                self.sys_result_label.setText("정상 세트 [BM1]")
                return
            
        err_string = "불량품 세트"
        self.sys_result_label.setText("불량품 세트" if self.lighter_error_flag.count(True) > 1 
                                      else "정상 세트 [BM2]")
        
    def quit_program(self):
        self.camera.stop()
        self.camera.release()
        subprocess.call(["sudo -V", "systemctl", "restart", "nvargus-daemon"], shell = True)
        sys.exit(0)

    def sigint_handler(self, sig, frame):
        self.quit_program()
コード例 #7
0
ファイル: checkGas.py プロジェクト: jyjun1015/new_bob
def getCapture(cap):  # 실시간으로 화면을 캡쳐 후 로컬저장함
    camera = CSI_Camera()
    camera.create_gstreamer_pipeline(
        sensor_id=0,
        sensor_mode=
        2,  # [TODO] Sensor_mode로 설정하지 말고 해상도로 설정하도록 해야할 것 같은데? 4032x3040 (Ratio: 4:3)
        framerate=30,
        flip_method=0,
        display_height=720,  # [TODO] 이것도 4:3으로 맞춰주는게 가장 best인데...
        display_width=1280)
    camera.open(camera.gstreamer_pipeline)
    camera.start()
    cv2.namedWindow("Gas Solution", cv2.WINDOW_AUTOSIZE)

    if not camera.video_capture.isOpened():
        print("Unable to open camera")
        SystemExit(0)

    try:
        camera.start_counting_fps()
        while cv2.getWindowProperty("Gas Solution", 0) >= 0:
            _, img_ori = camera.read()

            temp = cv2.cvtColor(img_ori, cv2.COLOR_RGB2GRAY)

            img = np.zeros([720, 720, 3])

            for i in range(3):
                img[:, :, i] = temp[:, 280:1000]
            img = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=5, scale=0.15)
            img = cv2.convertScaleAbs(img)

            cv2.imwrite(
                "images/" + str(cap) + ".jpg",
                img)  # [중요!] 이미지를 로컬에 저장함! TODO. 이거 저장하지말고 바로 판단하도록 바꿔야함
            #cv2.imshow("Gas Solution", img)                # [TODO]  이걸 굳이 띄울 필요가 있나?
            time.sleep(0.33)  # 의도적으로 3프레임으로 만들려고 0.33초 sleep
            camera.frames_displayed += 1
            cap = cap + 1
            if (cv2.waitKey(5) & 0xFF) == 27: break
    finally:
        camera.stop()
        camera.release()
        cv2.destroyAllWindows()
コード例 #8
0
def face_detect(sensor_mode=S_MODE_3_1280_720_60,
                dispW=DISP_W_M3_M4_one_half,
                dispH=DISP_H_M3_M4_one_half):

    face_cascade = cv2.CascadeClassifier(
        "/usr/share/opencv4/haarcascades/haarcascade_frontalface_default.xml")
    eye_cascade = cv2.CascadeClassifier(
        "/usr/share/opencv4/haarcascades/haarcascade_eye.xml")
    left_camera = CSI_Camera()

    # WS mod: IMPORTANT use lowest fps = 21 as default, otherwise modes 0 or 1 crash: reboot required
    left_camera.create_gstreamer_pipeline(
        sensor_id=0,
        sensor_mode=sensor_mode,
        framerate=21,
        flip_method=0,
        display_height=dispH,
        display_width=dispW,
    )
    left_camera.open(left_camera.gstreamer_pipeline)
    left_camera.start()
    txt = "Face Detect: Sensor Mode {}, Display {} x {}".format(
        sensor_mode, dispW, dispH)  # WS mod
    cv2.namedWindow(txt, cv2.WINDOW_AUTOSIZE)

    if (not left_camera.video_capture.isOpened()):
        # Cameras did not open, or no camera attached

        print("Unable to open any cameras")
        # TODO: Proper Cleanup
        SystemExit(0)
    try:
        # Start counting the number of frames read and displayed
        left_camera.start_counting_fps()
        while cv2.getWindowProperty(txt, 0) >= 0:
            img = read_camera(left_camera, False)
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            faces = face_cascade.detectMultiScale(gray, 1.3, 5)

            for (x, y, w, h) in faces:
                cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
                roi_gray = gray[y:y + h, x:x + w]
                roi_color = img[y:y + h, x:x + w]
                eyes = eye_cascade.detectMultiScale(roi_gray)
                for (ex, ey, ew, eh) in eyes:
                    cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh),
                                  (0, 255, 0), 2)
            if show_fps:
                draw_label(
                    img, "Frames Displayed (PS): " +
                    str(left_camera.last_frames_displayed), (10, 20))
                draw_label(
                    img,
                    "Frames Read (PS): " + str(left_camera.last_frames_read),
                    (10, 40))
            cv2.imshow(txt, img)
            left_camera.frames_displayed += 1
            keyCode = cv2.waitKey(5) & 0xFF
            # Stop the program on the ESC key
            if keyCode == 27:
                break
    finally:
        left_camera.stop()
        left_camera.release()
        cv2.destroyAllWindows()
コード例 #9
0
ファイル: checkShake.py プロジェクト: jyjun1015/lighter
def lighter_detect():
    camera = CSI_Camera()
    camera.create_gstreamer_pipeline(
        sensor_id       = 0,
        sensor_mode     = SENSOR_MODE,
        framerate       = 30,
        flip_method     = 0,
        display_height  = DISPLAY_HEIGHT,
        display_width   = DISPLAY_WIDTH,
    )
    camera.open(camera.gstreamer_pipeline)
    camera.start()
    cv2.namedWindow("Lighter Test FPS", cv2.WINDOW_AUTOSIZE)

    if not camera.video_capture.isOpened():
        print("Unable to open camera")
        SystemExit(0)
    
    try:
        camera.start_counting_fps()
        while cv2.getWindowProperty("Lighter Test FPS", 0) >= 0:
            img = read_camera(camera)
            blob = cv2.dnn.blobFromImage(img, 0.00392, (446, 446), (0, 0, 0), True, crop=False)
            net.setInput(blob)
            outs = net.forward(output_layers)
            
            confidences = []
            boxes = []

            for out in outs:
                for detection in out:
                    scores = detection[5:]
                    class_id = np.argmax(scores)
                    confidence = scores[class_id]
                    if confidence > 0.5 :
                        center_x = int(detection[0] * DISPLAY_WIDTH)
                        center_y = int(detection[1] * DISPLAY_HEIGHT)
                        w = int(detection[2] * DISPLAY_WIDTH)
                        h = int(detection[3] * DISPLAY_HEIGHT)
                        x = int(center_x - w / 2)
                        y = int(center_y - h / 2)
                        if class_id == 0: cv2.rectangle(img, (x, y), (x+w, y+h), (0, 0, 255), 2, cv2.LINE_AA)
                        elif class_id == 1: cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2, cv2.LINE_AA)
                        else: cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2, cv2.LINE_AA)
                        cv2.putText(img, classes[class_id], (x, y+30), cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), 2)
                        boxes.append([x, y, w, h])
                        confidences.append(float(confidence))

            cv2.imshow("Lighter Test FPS", img)
            camera.frames_displayed += 1
            if (cv2.waitKey(5) & 0xFF) == 27: break # ESC key Stops program
    finally:
        camera.stop()
        camera.release()
        cv2.destroyAllWindows()
コード例 #10
0
import cv2
import numpy as np
import os
import time
from csi_camera import CSI_Camera

# 로컬에 화면 캡쳐 이미지를 저장함
camera = CSI_Camera()
camera.create_gstreamer_pipeline(
sensor_id = 0,
sensor_mode = 2,
framerate = 30,
flip_method = 0,
display_height = 720,
display_width = 1280
)
camera.open(camera.gstreamer_pipeline)
camera.start()
cv2.namedWindow("Sticker Solution", cv2.WINDOW_AUTOSIZE)
cap = 0

if not camera.video_capture.isOpened():
        print("Unable to open camera")
        SystemExit(0)

try:
        camera.start_counting_fps()
        while cv2.getWindowProperty("Sticker Solution", 0) >= 0:
            _, img = camera.read()
            cv2.imwrite("datas/"+str(cap)+".jpg", img)
            print(cap, "save")
コード例 #11
0
ファイル: dual_camera_fps.py プロジェクト: UVeyes/CSI-Camera
def start_cameras():
    left_camera = CSI_Camera()
    left_camera.create_gstreamer_pipeline(
        sensor_id=0,
        sensor_mode=SENSOR_MODE_1080,
        exposure_low=13000,
        exposure_high=13000,
        gain_low=1,
        gain_high=22.25,
        framerate=30,
        flip_method=2,
        display_height=DISPLAY_HEIGHT,
        display_width=DISPLAY_WIDTH,
    )
    left_camera.open(left_camera.gstreamer_pipeline)
    left_camera.start()

    right_camera = CSI_Camera()
    right_camera.create_gstreamer_pipeline(
        sensor_id=1,
        sensor_mode=SENSOR_MODE_1080,
        exposure_low=13000,
        exposure_high=13000,
        gain_low=1,
        gain_high=22.25,
        framerate=30,
        flip_method=2,
        display_height=DISPLAY_HEIGHT,
        display_width=DISPLAY_WIDTH,
    )
    right_camera.open(right_camera.gstreamer_pipeline)
    right_camera.start()

    cv2.namedWindow("CSI Cameras", cv2.WINDOW_AUTOSIZE)

    if (not left_camera.video_capture.isOpened()
            or not right_camera.video_capture.isOpened()):
        # Cameras did not open, or no camera attached

        print("Unable to open any cameras")
        # TODO: Proper Cleanup
        SystemExit(0)
    try:
        # Start counting the number of frames read and displayed
        left_camera.start_counting_fps()
        right_camera.start_counting_fps()
        while cv2.getWindowProperty("CSI Cameras", 0) >= 0:
            left_image = read_camera(left_camera, show_fps)
            right_image = read_camera(right_camera, show_fps)
            # We place both images side by side to show in the window
            camera_images = np.hstack((left_image, right_image))
            cv2.imshow("CSI Cameras", camera_images)
            left_camera.frames_displayed += 1
            right_camera.frames_displayed += 1
            # This also acts as a frame limiter
            # Stop the program on the ESC key
            if (cv2.waitKey(1) & 0xFF) == 27:
                break

    finally:
        left_camera.stop()
        left_camera.release()
        right_camera.stop()
        right_camera.release()
    cv2.destroyAllWindows()
コード例 #12
0
def getCapture():  # 반복적으로 화면 캡쳐를 얻는 함수
    # 로컬에 화면 캡쳐 이미지를 저장함
    camera = CSI_Camera()
    camera.create_gstreamer_pipeline(sensor_id=0,
                                     sensor_mode=1,
                                     framerate=30,
                                     flip_method=2,
                                     display_height=720,
                                     display_width=1280)
    camera.open(camera.gstreamer_pipeline)
    camera.start()
    # cv2.namedWindow("Sticker Solution", cv2.WINDOW_AUTOSIZE)

    if not camera.video_capture.isOpened():
        print("Unable to open camera")
        SystemExit(0)

    try:
        # camera.start_counting_fps()
        # while cv2.getWindowProperty("Sticker Solution", 0) >= 0:
        while True:
            _, img = camera.read()
            img = img[:, 280:1000, :]
            cv2.imshow("img", img)
            # 외부 통신 삽입 자리

            # time.sleep(0.1)        #시작 전 여기 수정
            # camera.frames_displayed += 1
            if (cv2.waitKey(5) & 0xFF) == 27:
                break

        camera.stop()
        camera.release()
        cv2.destroyAllWindows()

    except:
        print("?")
コード例 #13
0
def getCapture(cap):  # 반복적으로 화면 캡쳐를 얻는 함수
    # 로컬에 화면 캡쳐 이미지를 저장함
    camera = CSI_Camera()
    camera.create_gstreamer_pipeline(sensor_id=0,
                                     sensor_mode=2,
                                     framerate=30,
                                     flip_method=0,
                                     display_height=720,
                                     display_width=1280)
    camera.open(camera.gstreamer_pipeline)
    camera.start()
    cv2.namedWindow("Sticker Solution", cv2.WINDOW_AUTOSIZE)

    if not camera.video_capture.isOpened():
        print("Unable to open camera")
        SystemExit(0)

    try:
        camera.start_counting_fps()
        while cv2.getWindowProperty("Sticker Solution", 0) >= 0:
            _, img = camera.read()
            cv2.imwrite("images/" + str(cap) + ".jpg", img)
            cv2.imshow("Sticker Solution", img)
            camera.frames_displayed += 1
            cap = cap + 1
            if (cv2.waitKey(5) & 0xFF) == 27: break
    finally:
        camera.stop()
        camera.release()
        cv2.destroyAllWindows()
コード例 #14
0
ファイル: gas_test_gpu.py プロジェクト: jyjun1015/new_bob
def getCapture():
    camera = CSI_Camera()
    camera.create_gstreamer_pipeline(sensor_id=0,
                                     sensor_mode=0,
                                     framerate=30,
                                     flip_method=0,
                                     display_height=DISPLAY_HEIGHT,
                                     display_width=DISPLAY_WIDTH)
    camera.open(camera.gstreamer_pipeline)
    camera.start()
    cv2.namedWindow("Gas Test Display", cv2.WINDOW_AUTOSIZE)

    if not camera.video_capture.isOpened():
        print("Unable to open camera")
        SystemExit(0)

    try:
        camera.start_counting_fps()
        image_num = 1
        gpu_frame = cv2.cuda_GpuMat()

        while cv2.getWindowProperty("Gas Test Display", 0) >= 0:
            _, original_img = camera.read()
            gpu_frame.upload(original_img)

            # Apply grayscale and sobel filter
            #temp = cv2.cvtColor(original_img, cv2.COLOR_RGB2GRAY)
            #img = np.zeros([DISPLAY_HEIGHT, DISPLAY_WIDTH, 3])
            #for i in range(3) : img[:, :, i] = temp[:, :]
            #img = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=5, scale=0.15)
            #img = cv2.convertScaleAbs(img)
            # Apply Sharpnening filter
            #img = getSharpening(img, 2)

            gpu_frame.download(original_img)
            cv2.imshow("Gas Test Display", original_img)
            camera.frames_displayed += 1

            if (cv2.waitKey(25) & 0xFF) == 27:
                break  # Quit if you pressed ESC.
            #if (cv2.waitKey(25) & 0xFF) == 13:          # Capture if you pressed ENTER.
            #   cv2.imwrite('/home/jyjun/test' + str(image_num) + '.png', img)
            #  print("<Image captured successfully!>")
            # image_num = image_num + 1
    finally:
        camera.stop()
        camera.release()
        cv2.destroyAllWindows()
コード例 #15
0
LOWER_NOT_ROI = 3 * DISPLAY_HEIGHT // 4

# Standard Lines
UPPER_STANDARD_LINE = 0
LOWER_STANDARD_LINE = 0
UPPER_NORMAL_LINE = 0
LOWER_NORMAL_LINE = 0


# Trackbar
def onChange(a):
    pass


if __name__ == "__main__":
    camera = CSI_Camera()
    camera.create_gstreamer_pipeline(sensor_id=0,
                                     sensor_mode=0,
                                     framerate=30,
                                     flip_method=2,
                                     display_height=DISPLAY_HEIGHT,
                                     display_width=DISPLAY_WIDTH)
    camera.open(camera.gstreamer_pipeline)
    camera.start()
    cv2.namedWindow("DISPLAY", cv2.WINDOW_AUTOSIZE)

    cv2.createTrackbar("rowROI", "DISPLAY", 0, LOWER_NOT_ROI, onChange)
    cv2.createTrackbar("colROI", "DISPLAY", 0, 200, onChange)
    cv2.setTrackbarPos("rowROI", "DISPLAY", 0)
    cv2.setTrackbarPos("colROI", "DISPLAY", 100)
コード例 #16
0
def getCapture():  # 실시간으로 화면을 캡쳐 후 로컬저장함
    global cap
    camera = CSI_Camera()
    camera.create_gstreamer_pipeline(sensor_id=0,
                                     sensor_mode=2,
                                     framerate=15,
                                     flip_method=0,
                                     display_height=DISPLAY_HEIGHT,
                                     display_width=DISPLAY_WIDTH)
    camera.open(camera.gstreamer_pipeline)
    camera.start()
    cv2.namedWindow("Gas Solution", cv2.WINDOW_AUTOSIZE)

    if not camera.video_capture.isOpened():
        print("Unable to open camera")
        SystemExit(0)

    try:
        camera.start_counting_fps()
        while cv2.getWindowProperty("Gas Solution", 0) >= 0:
            _, img_ori = camera.read()

            temp = cv2.cvtColor(img_ori, cv2.COLOR_RGB2GRAY)

            img = np.zeros([DISPLAY_HEIGHT, DISPLAY_HEIGHT, 3])

            diff_half = (DISPLAY_WIDTH - DISPLAY_HEIGHT) // 2
            for i in range(3):
                img[:, :, i] = temp[:, diff_half:DISPLAY_WIDTH - diff_half]
            img = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=5, scale=0.15)
            img = cv2.convertScaleAbs(img)
            cv2.imwrite("images/" + str(cap) + ".jpg", img)
            cap = cap + 1
            cv2.imshow("Gas Solution", img)
            camera.frames_displayed += 1
            if (cv2.waitKey(5) & 0xFF) == 27: break
    finally:
        camera.stop()
        camera.release()
        cv2.destroyAllWindows()
コード例 #17
0
ファイル: gas_test.py プロジェクト: jyjun1015/new_bob
def getCapture():  # 반복적으로 화면 캡쳐를 얻는 함수
    # 로컬에 화면 캡쳐 이미지를 저장함
    camera = CSI_Camera()
    camera.create_gstreamer_pipeline(sensor_id=0,
                                     sensor_mode=0,
                                     framerate=30,
                                     flip_method=0,
                                     display_height=720,
                                     display_width=1280)
    camera.open(camera.gstreamer_pipeline)
    camera.start()

    try:
        # camera.start_counting_fps()
        while 1:
            _, img_ori = camera.read()

            temp = cv2.cvtColor(img_ori, cv2.COLOR_RGB2GRAY)
            temp = cv2.cuda.cvtColor(img_ori, cv.COLOR_RGB2GRAY)

            img = np.zeros([720, 720, 3])
            for i in range(3):
                img[:, :, i] = temp[:, 280:1000]

            img = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=5, scale=0.15)
            img = cv2.cuda.createSobelFilter(cv.CV_8UC1, -1, 1, 1)

            img = cv2.convertScaleAbs(img)

            sub_img = copy.deepcopy(img)

            print("1")
            raw = 0
            try:
                print("2")
                temp, stick = findRaw(img)  # 라이터 위치를 특정하기 위한 받침대 위치 확인
                print(temp, stick)
                if temp is not None and temp > 0:
                    # if 0.9*raw < temp < 1.1*raw : print("카메라가 위치를 벗어남")   # 이전 프레임과 비교하여 받침대 위치가 벗어나면 카메라가 움직인 것
                    raw = temp

                print("3")
                # 상한~하한선 찾기
                print(stick, raw)
                h_line, m_line, l_line = checkLineRatio(stick, raw)

                print(h_line, m_line, l_line)

                cv2.line(sub_img, (0, raw), (720, raw), (255, 255, 0), 1)
                cv2.line(sub_img, (0, stick), (720, stick), (255, 255, 0), 1)
                cv2.line(sub_img, (0, h_line), (720, h_line), (0, 255, 0), 1)
                cv2.line(sub_img, (0, m_line), (720, m_line), (0, 255, 0), 1)
                cv2.line(sub_img, (0, l_line), (720, l_line), (0, 255, 0), 1)

                cv2.imwrite("images/" + str(10) + ".jpg", sub_img)

            except:
                pass

            cv2.imshow("Gas Solution", sub_img)
            time.sleep(10)

            if (cv2.waitKey(5) & 0xFF) == 27: break
    finally:
        camera.stop()
        camera.release()
        cv2.destroyAllWindows()