Esempio n. 1
0
    def postProcessJsonConfig(self, config):
        # merge board config, if exists
        if self.args['board']:
            board_path = Path(self.args['board'])
            if not board_path.exists():
                board_path = Path(consts.resource_paths.boards_dir_path) / Path(self.args['board'].upper()).with_suffix('.json')
                if not board_path.exists():
                    print('ERROR: Board config not found: {}'.format(board_path))
                    os._exit(2)
            with open(board_path) as fp:
                board_config = json.load(fp)
            utils.merge(board_config, config)

        # handle config overwrite option.
        if self.args['config_overwrite']:
            self.args['config_overwrite'] = json.loads(self.args['config_overwrite'])
            config = utils.merge(self.args['config_overwrite'],config)
            print("Merged Pipeline config with overwrite",config)

        # Append video stream if video recording was requested and stream is not already specified
        self.video_file = None
        if self.args['video'] is not None:
            
            # open video file
            try:
                self.video_file = open(self.args['video'], 'wb')
                if config['streams'].count('video') == 0:
                    config['streams'].append('video')
            except IOError:
                print("Error: couldn't open video file for writing. Disabled video output stream")
                if config['streams'].count('video') == 1:
                    config['streams'].remove('video')

        return config
Esempio n. 2
0
 def __init__(self):
     self.args = vars(parse_args())
     self.config = {
         'streams':
         ['left', 'right'] if not on_embedded else [{
             'name': 'left',
             "max_fps": 10.0
         }, {
             'name': 'right',
             "max_fps": 10.0
         }],
         'depth': {
             'calibration_file': consts.resource_paths.calib_fpath,
             'padding_factor': 0.3
         },
         'ai': {
             'blob_file': consts.resource_paths.blob_fpath,
             'blob_file_config': consts.resource_paths.blob_config_fpath,
             'shaves': 7,
             'cmx_slices': 7,
             'NN_engines': 1,
         },
         'board_config': {
             'swap_left_and_right_cameras': self.args['swap_lr'],
             'left_fov_deg': self.args['field_of_view'],
             'left_to_right_distance_cm': self.args['baseline'],
             'override_eeprom': True,
             'stereo_center_crop': True,
         },
         'camera': {
             'mono': {
                 # 1280x720, 640x400 (binning enabled)
                 # only 720/30 fps supported for now
                 'resolution_h': 720,
                 'fps': 30,
             },
         },
     }
     if self.args['board']:
         board_path = Path(self.args['board'])
         if not board_path.exists():
             board_path = Path(
                 consts.resource_paths.boards_dir_path) / Path(
                     self.args['board'].upper()).with_suffix('.json')
             if not board_path.exists():
                 raise ValueError(
                     'Board config not found: {}'.format(board_path))
         with open(board_path) as fp:
             board_config = json.load(fp)
         utils.merge(board_config, self.config)
     if self.args['config_overwrite']:
         utils.merge(json.loads(self.args['config_overwrite']), self.config)
         print("Merged Pipeline config with overwrite", self.config)
     if self.args['dev_debug']:
         self.cmd_file = ''
         print('depthai will not load cmd file into device.')
     self.total_images = self.args['count'] * len(
         setPolygonCoordinates(1000, 600))  # random polygons for count
     print("Using Arguments=", self.args)
 def __load_config(self):
     self.config = self.__get_default_config()
     res, board_config = self.__get_board_config()
     if not res:
         log.error('There was a problem loading the board config.')
         raise Exception('There was a problem loading the board config.')
     self.config = utils.merge(board_config, self.config)
     if self.config_overwrite is not None:
         self.config = utils.merge(self.config_overwrite, self.config)
         log.info(f'Merged pipeline config with overwrite: {self.config}')
Esempio n. 4
0
 def __init__(self):
     self.args = vars(parse_args())
     self.config = {
         'streams': ['left', 'right'],
         'depth': {
             'calibration_file': consts.resource_paths.calib_fpath,
             'padding_factor': 0.3
         },
         'ai': {
             'blob_file': consts.resource_paths.blob_fpath,
             'blob_file_config': consts.resource_paths.blob_config_fpath
         },
         'board_config': {
             'swap_left_and_right_cameras': self.args['swap_lr'],
             'left_fov_deg': self.args['field_of_view'],
             'left_to_right_distance_cm': self.args['baseline'],
             'override_eeprom': True,
             'stereo_center_crop': True,
         }
     }
     if self.args['board']:
         board_path = Path(self.args['board'])
         if not board_path.exists():
             board_path = Path(
                 consts.resource_paths.boards_dir_path) / Path(
                     self.args['board'].upper()).with_suffix('.json')
             if not board_path.exists():
                 raise ValueError(
                     'Board config not found: {}'.format(board_path))
         with open(board_path) as fp:
             board_config = json.load(fp)
         utils.merge(board_config, self.config)
     if self.args['config_overwrite']:
         utils.merge(json.loads(self.args['config_overwrite']), self.config)
         print("Merged Pipeline config with overwrite", self.config)
     if self.args['dev_debug']:
         self.cmd_file = ''
         print('depthai will not load cmd file into device.')
     self.total_images = self.args['count'] * len(
         setPolygonCoordinates(1000, 600))  # random polygons for count
     print("Using Arguments=", self.args)
        'blob_file': consts.resource_paths.blob_fpath,
        'blob_file_config': consts.resource_paths.blob_config_fpath,
        'calc_dist_to_bb': True
    },
    'board_config':
    {
        'swap_left_and_right_cameras': True, # True for 1097 (RPi Compute) and 1098OBC (USB w/onboard cameras)
        'left_fov_deg': 69.0, # Same on 1097 and 1098OBC
        #'left_to_right_distance_cm': 9.0, # Distance between stereo cameras
        'left_to_right_distance_cm': 7.5, # Distance between 1098OBC cameras
        'left_to_rgb_distance_cm': 2.0 # Currently unused
    }
}

if args['config_overwrite'] is not None:
    config = utils.merge(args['config_overwrite'],config)
    print("Merged Pipeline config with overwrite",config)

if 'depth_sipp' in config['streams'] and ('depth_color_h' in config['streams'] or 'depth_mm_h' in config['streams']):
    print('ERROR: depth_sipp is mutually exclusive with depth_color_h')
    exit(2)
    # del config["streams"][config['streams'].index('depth_sipp')]

stream_names = [stream if isinstance(stream, str) else stream['name'] for stream in config['streams']]

# create the pipeline, here is the first connection with the device
p = depthai.create_pipeline(config=config)

if p is None:
    print('Pipeline is not created.')
    exit(2)
Esempio n. 6
0
    #    'numBFrames': 0,
    #    'quality': 80 # (0 - 100%) When using VBR
    #}
}

if args['board']:
    board_path = Path(args['board'])
    if not board_path.exists():
        board_path = Path(consts.resource_paths.boards_dir_path) / Path(
            args['board'].upper()).with_suffix('.json')
        if not board_path.exists():
            print('ERROR: Board config not found: {}'.format(board_path))
            os._exit(2)
    with open(board_path) as fp:
        board_config = json.load(fp)
    utils.merge(board_config, config)
if args['config_overwrite'] is not None:
    config = utils.merge(args['config_overwrite'], config)
    print("Merged Pipeline config with overwrite", config)

if 'depth_sipp' in config['streams'] and ('depth_color_h' in config['streams']
                                          or 'depth_mm_h'
                                          in config['streams']):
    print('ERROR: depth_sipp is mutually exclusive with depth_color_h')
    exit(2)
    # del config["streams"][config['streams'].index('depth_sipp')]

# Append video stream if video recording was requested and stream is not already specified
video_file = None
if args['video'] is not None:
def main():
    args = vars(parse_args())

    if args['config_overwrite']:
        args['config_overwrite'] = json.loads(args['config_overwrite'])

    print("Using Arguments=", args)

    if 'capture' in args['mode']:

        # Delete Dataset directory if asked
        if args['image_op'] == 'delete':
            shutil.rmtree('dataset/')

        # Creates dirs to save captured images
        try:
            for path in ["left", "right"]:
                Path("dataset/" + path).mkdir(parents=True, exist_ok=True)
        except OSError as e:
            print(
                "An error occurred trying to create image dataset directories:",
                e)
            exit(0)

        # Create Depth AI Pipeline to start video streaming
        cmd_file = consts.resource_paths.device_cmd_fpath

        # Possible to try and reboot?
        # The following doesn't work (have to manually hit switch on device)
        # depthai.reboot_device
        # time.sleep(1)
        if not depthai.init_device(cmd_file=cmd_file):
            print(
                "[ERROR] Unable to initialize device. Try to reset it. Exiting."
            )
            exit(1)

        config = {
            'streams': ['left', 'right'],
            'depth': {
                'calibration_file': consts.resource_paths.calib_fpath,
                # 'type': 'median',
                'padding_factor': 0.3
            },
            'ai': {
                'blob_file': consts.resource_paths.blob_fpath,
                'blob_file_config': consts.resource_paths.blob_config_fpath
            },
            'board_config': {
                'swap_left_and_right_cameras': True,
                'left_fov_deg': 69.0,
                'left_to_right_distance_cm': 9.0
            }
        }

        if args['config_overwrite'] is not None:
            config = utils.merge(args['config_overwrite'], config)
            print("Merged Pipeline config with overwrite", config)

        pipeline = depthai.create_pipeline(config)

        if pipeline is None:
            print("[ERROR] Unable to create pipeline. Exiting.")
            exit(2)

        num_of_polygons = 0
        polygons_coordinates = []

        image_per_polygon_counter = 0  # variable to track how much images were captured per each polygon
        complete = False  # Indicates if images have been captured for all polygons

        polygon_index = args['polygons'][
            0]  # number to track which polygon is currently using
        total_num_of_captured_images = 0  # variable to hold total number of captured images

        capture_images = False  # value to track the state of capture button (spacebar)
        captured_left_image = False  # value to check if image from the left camera was capture
        captured_right_image = False  # value to check if image from the right camera was capture

        run_capturing_images = True  # value becames False and stop the main loop when all polygon indexes were used

        calculate_coordinates = False  # track if coordinates of polynoms was calculated
        total_images = args['count'] * len(args['polygons'])

        # Chessboard detection termination criteria
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30,
                    0.001)
        check_frame = 30
        capture_failed = False
        leftcnt = 0
        rightcnt = 0
        leftcolor = (0, 0, 255)
        rightcolor = (0, 0, 255)

        while run_capturing_images:
            _, data_list = pipeline.get_available_nnet_and_data_packets()
            for packet in data_list:
                if packet.stream_name == 'left' or packet.stream_name == 'right':
                    if packet.stream_name == 'left':
                        leftcnt += 1
                    if packet.stream_name == 'right':
                        rightcnt += 1

                    frame = packet.getData()

                    if calculate_coordinates == False:
                        height, width = frame.shape
                        polygons_coordinates = setPolygonCoordinates(
                            height, width)
                        # polygons_coordinates = select_polygon_coords(polygons_coordinates,args['polygons'])
                        num_of_polygons = len(args['polygons'])
                        print(
                            "Starting image capture. Press the [ESC] key to abort."
                        )
                        print(
                            "Will take %i total images, %i per each polygon." %
                            (total_images, args['count']))
                        calculate_coordinates = True

                    frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)

                    if capture_images == True:
                        if packet.stream_name == 'left':
                            if find_chessboard(frame, small=False):
                                filename = image_filename(
                                    packet.stream_name, polygon_index,
                                    total_num_of_captured_images)
                                cv2.imwrite("dataset/left/" + str(filename),
                                            frame)
                                print("py: Saved image as: " + str(filename))
                                captured_left_image = True
                            else:
                                print(
                                    "py: could not find chessboard, try again")
                                capture_failed = True
                                capture_images, captured_left_image, captured_right_image = False, False, False

                        elif packet.stream_name == 'right':
                            if find_chessboard(frame, small=False):
                                filename = image_filename(
                                    packet.stream_name, polygon_index,
                                    total_num_of_captured_images)
                                cv2.imwrite("dataset/right/" + str(filename),
                                            frame)
                                print("py: Saved image as: " + str(filename))
                                captured_right_image = True
                            else:
                                print(
                                    "py: could not find chess board, try again"
                                )
                                capture_failed = True
                                capture_images, captured_left_image, captured_right_image = False, False, False

                        if captured_right_image == True and captured_left_image == True:
                            capture_failed = False
                            capture_images = False
                            captured_left_image = False
                            captured_right_image = False
                            total_num_of_captured_images += 1
                            image_per_polygon_counter += 1

                            if image_per_polygon_counter == args['count']:
                                image_per_polygon_counter = 0
                                try:
                                    polygon_index = args['polygons'][
                                        args['polygons'].index(polygon_index) +
                                        1]
                                except IndexError:
                                    complete = True

                    if complete == False:
                        if rightcnt % check_frame == 0:
                            # Find the chess board corners once a second
                            if find_chessboard(frame):
                                rightcolor = (0, 255, 0)
                            else:
                                rightcolor = (0, 0, 255)
                        if leftcnt % check_frame == 0:
                            if find_chessboard(frame):
                                leftcolor = (0, 255, 0)
                            else:
                                leftcolor = (0, 0, 255)

                        cv2.putText(
                            frame,
                            "Align cameras with callibration board and press spacebar to capture the image:",
                            (0, 25), cv2.FONT_HERSHEY_SIMPLEX, 1.0,
                            (0, 255, 0))
                        cv2.putText(
                            frame, "Polygon Position: %i. " % (polygon_index) +
                            "Captured %i of %i images." %
                            (total_num_of_captured_images, total_images),
                            (0, 700), cv2.FONT_HERSHEY_SIMPLEX, 1.0,
                            (255, 0, 0))
                        if capture_failed:
                            cv2.putText(
                                frame,
                                "No chessboard detected, please try again ",
                                (0, 60), cv2.FONT_HERSHEY_SIMPLEX, 1.0,
                                (0, 0, 255))

                        if packet.stream_name == 'left':
                            cv2.polylines(
                                frame,
                                np.array([
                                    getPolygonCoordinates(
                                        polygon_index, polygons_coordinates)
                                ]), True, leftcolor, 4)
                        else:
                            cv2.polylines(
                                frame,
                                np.array([
                                    getPolygonCoordinates(
                                        polygon_index, polygons_coordinates)
                                ]), True, rightcolor, 4)

                        frame = cv2.resize(frame, (0, 0), fx=0.8, fy=0.8)
                        cv2.imshow(packet.stream_name, frame)

                    else:
                        # all polygons used, stop the loop
                        run_capturing_images = False

            key = cv2.waitKey(1)

            if key == ord(" "):
                capture_images = True

            elif key == ord("q"):
                print("py: Calibration has been interrupted!")
                exit(0)

        del pipeline  # need to manualy delete the object, because of size of HostDataPacket queue runs out (Not enough free space to save {stream})

        cv2.destroyWindow("left")
        cv2.destroyWindow("right")

    else:
        print("Skipping capture.")

    if 'process' in args['mode']:
        print("Starting image processing")
        cal_data = StereoCalibration()
        try:
            cal_data.calibrate("dataset", args['square_size_cm'],
                               "./resources/depthai.calib")
        except AssertionError as e:
            print("[ERROR] " + str(e))
            exit(1)
    else:
        print("Skipping process.")

    print('py: DONE.')
    def setup():

        global pitch_pid_modifier
        global rotate_pid_modifier
        global t_start = time()
        global time_start = time()
        global frame_count = {}
        global frame_count_prev = {}
        global bus 
        global slave_address 
        global i2c_cmd

        self.arg_setup()

        # imshow_debug = False
        if self.args['imshow_debug']:
            self.imshow_debug = True

        self.timeout_time = self.tmout() 

        pitch_pid_modifier  = self.args['pitch_pid_modify']
    
        rotate_pid_modifier = self.args['rotate_pid_modify']

        if self.args['i2c_off']:
            self.communication_on = False
        else:
            self.communication_on = True

        trackbars_on = False
        if self.args['create_trackbars']:
            self.trackbars_on = True
            trackbars_on = True
            self.imshow_debug = True

        # cmd_file = consts.resource_paths.device_cmd_fpath
        if self.args['dev_debug']:
            self.cmd_file = ''
            print('depthai will not load cmd file into device.')

        with open(consts.resource_paths.blob_labels_fpath) as fp:
            self.labels = fp.readlines()
            self.labels = [i.strip() for i in labels]

        print('depthai.__version__ == %s' % depthai.__version__)
        print('depthai.__dev_version__ == %s' % depthai.__dev_version__)

                
        if not depthai.init_device(self.cmd_file):
            print("Error initializing device. Try to reset it.")
            exit(1)


        print('Available streams: ' + str(depthai.get_available_steams()))


        if self.args['config_overwrite'] is not None:
            config = utils.merge(self.args['config_overwrite'],self.config)
            print("Merged Pipeline config with overwrite",config)

        if 'depth_sipp' in config['streams'] and ('depth_color_h' in config['streams'] or 'depth_mm_h' in config['streams']):
            print('ERROR: depth_sipp is mutually exclusive with depth_color_h')
            exit(2)
            # del config["streams"][config['streams'].index('depth_sipp')]

        self.stream_names = [stream if isinstance(stream, str) else stream['name'] for stream in config['streams']]

        # create the pipeline, here is the first connection with the device
        self.p = depthai.create_pipeline(self.config=config)

        if self.p is None:
            print('Pipeline is not created.')
            exit(2)


        # t_start = time()
        # time_start = time()
        # frame_count = {}
        # frame_count_prev = {}
        for s in self.stream_names:
            frame_count[s] = 0
            frame_count_prev[s] = 0

        self.entries_prev = []


        ##################    I2C COMMUNICATION SETUP    ####################
        if self.communication_on:
            self.bus = smbus.SMBus(1)

        ################## ADDED FOR COLOR DETECTION CWM ####################
        if self.imshow_debug:
            cv2.namedWindow('g_image')
            cv2.namedWindow('r_image')

        # if self.trackbars_on:
        #     cv2.namedWindow('r1_sliders')
        #     cv2.namedWindow('r2_sliders')
        #     cv2.namedWindow('g_sliders')
        
        if trackbars_on:
            cv2.namedWindow('r1_sliders')
            cv2.namedWindow('r2_sliders')
            cv2.namedWindow('g_sliders')
    
        # white blank image
        self.blank_image = 255 * np.ones(shape=[10, 256, 3], dtype=np.uint8)
        self.thrs=50




        #cv2.createTrackbar('Hue', 'image', 80, 179, nothing)
        #cv2.createTrackbar('Sat', 'image', 127, 255, nothing)
        #cv2.createTrackbar('Val', 'image', 222, 255, nothing)

        if trackbars_on:
            
            cv2.createTrackbar('filterThresh', 'r1_sliders', self.thresholdValue, 100, nothing)
            cv2.createTrackbar('r1LowHue', 'r1_sliders', self.r1LowHue, 179, nothing)
            cv2.createTrackbar('r1LowSat', 'r1_sliders', self.r1LowSat, 255, nothing)
            cv2.createTrackbar('r1LowVal', 'r1_sliders', self.r1LowVal, 255, nothing)
            cv2.createTrackbar('r1UpHue', 'r1_sliders', self.r1UpHue, 179, nothing)
            cv2.createTrackbar('r1UpSat', 'r1_sliders', self.r1UpSat, 255, nothing)
            cv2.createTrackbar('r1UpVal', 'r1_sliders', self.r1UpVal, 255, nothing)
            cv2.createTrackbar('r2LowHue', 'r2_sliders', self.r2LowHue, 179, nothing)
            cv2.createTrackbar('r2LowSat', 'r2_sliders', self.r2LowSat, 255, nothing)
            cv2.createTrackbar('r2LowVal', 'r2_sliders', self.r2LowVal, 255, nothing)
            cv2.createTrackbar('r2UpHue', 'r2_sliders', self.r2UpHue, 179, nothing)
            cv2.createTrackbar('r2UpSat', 'r2_sliders', self.r2UpSat, 255, nothing)
            cv2.createTrackbar('r2UpVal', 'r2_sliders', self.r2UpVal, 255, nothing)
            cv2.createTrackbar('gLowHue', 'g_sliders', self.gLowHue, 179, nothing)
            cv2.createTrackbar('gLowSat', 'g_sliders', self.gLowSat, 255, nothing)
            cv2.createTrackbar('gLowVal', 'g_sliders', self.gLowVal, 255, nothing)
            cv2.createTrackbar('gUpHue', 'g_sliders', self.gUpHue, 179, nothing)
            cv2.createTrackbar('gUpSat', 'g_sliders', self.gUpSat, 255, nothing)
            cv2.createTrackbar('gUpVal', 'g_sliders', self.gUpVal, 255, nothing)


        ## red ball mask areas
        #red_mask_1 = cv2.inRange(im_hsv, (0, 120, 70), (10, 255, 255))
        #red_mask_2 = cv2.inRange(im_hsv, (170, 120, 70), (179, 255, 255)) 

        #lower_red1 = np.array([0, 120, 100])
        #upper_red1 = np.array([10, 255, 255])
        #lower_red2 = np.array([170, 120, 100])
        #upper_red2 = np.array([179, 255, 255])

        #green mask area centered around 
        # (80, 127, 222)
        #green_mask = cv2.inRange(im_hsv, (55, 120, 70), (105, 255, 255)) 
        #    lower_green = np.array([40, 10, 200])
        #    upper_green = np.array([120, 245, 255])

        #lower_green = np.array([55, 120, 70])
        #upper_green = np.array([105, 255, 255])


        #sets how much to blur
        self.filt=39
        self.exitNow=0
        self.pause=0