Пример #1
0
    def __init__(self, threshold=0.5, fps=None):
        if not depthai.init_device(consts.resource_paths.device_cmd_fpath):
            raise RuntimeError("Error initializing device. Try to reset it.")

        self.p = depthai.create_pipeline(
            config={
                "streams": ["metaout", "previewout"]
                if not isinstance(fps, int) else [{
                    'name': 'previewout',
                    "max_fps": fps
                }, {
                    'name': 'metaout',
                    "max_fps": fps
                }],
                "ai": {
                    "blob_file":
                    str(Path('./model/model.blob').resolve().absolute()),
                    "blob_file_config":
                    str(Path('./model/config.json').resolve().absolute())
                }
            })

        if self.p is None:
            raise RuntimeError("Error creating a pipeline!")

        self.entries_prev = []
        self.threshold = threshold
Пример #2
0
 def create_pipeline(config):
     log.info("Creating DepthAI pipeline...")
     if not depthai.init_device(consts.resource_paths.device_cmd_fpath):
         raise RuntimeError("Error initializing device. Try to reset it.")
     pipeline = depthai.create_pipeline(config)
     if pipeline is None:
         raise RuntimeError("Pipeline was not created.")
     log.info("Pipeline created.")
     return pipeline
Пример #3
0
    def create_pipeline(config):
        if not depthai.init_device(consts.resource_paths.device_cmd_fpath):
            log.error('Failed to initialize device raising a RuntimeError')
            raise RuntimeError('Error initializing device. Try to reset it.')
        log.info('Creating DepthAI pipeline...')

        pipeline = depthai.create_pipeline(config)
        if pipeline is None:
            log.error('Failed to create pipeline raising a RuntimeError.')
            raise RuntimeError('Pipeline was not created.')

        log.info('Pipeline created successfully.')
        return pipeline
Пример #4
0
def setupLuxonis():
	if not depthai.init_device(consts.resource_paths.device_cmd_fpath):
		print("Error Initializing device. Try to reset it.")
		exit(1)

	config= {
		'streams': ['metaout','previewout'],
		#[{
		#	'name': 'previewout', 
		#	'max_fps': 20.0
		#},
		#{
		#	'name': 'metaout',
		#	'max_fps': 20.0
		#}],
		'depth':
		{
			'calibration_file': consts.resource_paths.calib_fpath,
			# 'type': 'median',
			'padding_factor': 0.3
		},
		'ai': 
		{
			'blob_file': consts.resource_paths.blob_fpath,
			'blob_file_config': consts.resource_paths.blob_config_fpath,
			'calc_dist_to_bb': True
			#'blob_file': '/home/riley/depthai-python-extras/depthai-tutorials-practice/2-face-detection-retail/BadGuyFP16.blob',
			#'blob_file_config': '/home/riley/depthai-python-extras/depthai-tutorials-practice/2-face-detection-retail/BadGuy.json',
			#'calc_dist_to_bb': True
		},
		'board_config':
		{
			'swap_left_and_right_cameras': True, # True for 1097 (RPi Compute) and 1098OBC (USB w/onboard cameras)
			'left_fov_deg': 69.0, # Same on 1097 and 1098OBC
			'left_to_right_distance_cm': 7.5, # Distance between stereo cameras
			'left_to_rgb_distance_cm': 2.0 # Currently unused
		}
	}

	

	p = depthai.create_pipeline(config=config)

	if p is None:
		print("Error creating pipeline")
		exit(2)
	return config, p
Пример #5
0
    def get_pipeline(self):
        # Possible to try and reboot?
        # The following doesn't work (have to manually hit switch on device)
        # depthai.reboot_device
        # time.sleep(1)
        if not depthai.init_device(cmd_file=self.cmd_file):
            raise RuntimeError("Unable to initialize device. Try to reset it")

        pipeline = depthai.create_pipeline(self.config)

        if pipeline is None:
            raise RuntimeError("Unable to create pipeline")

        try:
            yield pipeline
        finally:
            del pipeline
Пример #6
0
def camera_init(streams=None, configs=None):

    # output streams: left, right, previewout, metaout, disparity, depth_sipp, depth_color_h
    if streams is None:
        streams = ['left', 'right', 'previewout'] # make sure to always put 'left' first
 
    # camera configs
    if configs is None:
        configs = {
            'streams': streams,
            'depth': {
                'calibration_file': '', # we're going to do our own calibration since we're not using their AI
                'padding_factor': 0.3
            },
            'ai': { 
                'blob_file': blob_file,
                'calc_dist_to_bb': False                # I think this will decrease processing time
            },
            'board_config': {
                'swap_left_and_right_cameras': True,    # True for 1097 (RPi Compute) and 1098OBC (USB w/onboard cameras)
                'left_fov_deg': 69.0,                   # Same on 1097 and 1098OBC
                'left_to_right_distance_cm': 7.5,       # Distance between stereo cameras
            }
        }

    # camera init
    if not depthai.init_device(device_cmd_file):
        print("Error initializing device. Try to reset it.")
        exit(1)

    # create the pipeline, here is the first connection with the device
    pipeline = depthai.create_pipeline(config=configs)
    if pipeline is None:
        print('Pipeline is not created.')
        exit(2)

    return pipeline
Пример #7
0
    }
}

if args['config_overwrite'] is not None:
    config = utils.merge(args['config_overwrite'],config)
    print("Merged Pipeline config with overwrite",config)

if 'depth_sipp' in config['streams'] and ('depth_color_h' in config['streams'] or 'depth_mm_h' in config['streams']):
    print('ERROR: depth_sipp is mutually exclusive with depth_color_h')
    exit(2)
    # del config["streams"][config['streams'].index('depth_sipp')]

stream_names = [stream if isinstance(stream, str) else stream['name'] for stream in config['streams']]

# create the pipeline, here is the first connection with the device
p = depthai.create_pipeline(config=config)

if p is None:
    print('Pipeline is not created.')
    exit(2)


t_start = time()
time_start = time()
frame_count = {}
frame_count_prev = {}
for s in stream_names:
    frame_count[s] = 0
    frame_count_prev[s] = 0

entries_prev = []
Пример #8
0
	print("Error Initializing device. Try to reset it.")
	exit(1)

# create pipeline for previewout stream
p = depthai.create_pipeline(config={
	'streams': 
	{
		'name': 'previewout', 
		#'max_fps': 12.0
	}, # {'name': 'depth_sipp', "max_fps": 12.0}, 
	'depth':
    	{
        	'calibration_file': consts.resource_paths.calib_fpath,
        	# 'type': 'median',
        	'padding_factor': 0.3
    	},
	'ai': 
	{
		'blob_file': consts.resource_paths.blob_fpath
	},
	'board_config':
    	{
        	'swap_left_and_right_cameras': True, # True for 1097 (RPi Compute) and 1098OBC (USB w/onboard cameras)
        	'left_fov_deg': 69.0, # Same on 1097 and 1098OBC
        	'left_to_right_distance_cm': 7.5, # Distance between stereo cameras
        	'left_to_rgb_distance_cm': 2.0 # Currently unused
    	}
})

if p is None:
	print("Error creating pipeline")
	exit(2)
Пример #9
0
    def activate(imshow_debug = False, timeout_time = 120.0, pitch_pid_modifier = None, 
                 rotate_pid_modifier = None, communication_on = True, trackbars_on = False, 
                 red = True):
        
        self.imshow_debug           = imshow_debug
        self.timeout_time           = timeout_time
        self.pitch_pid_modifier     = pitch_pid_modifier
        self.rotate_pid_modifier    = rotate_pid_modifier
        self.communication_on       = communication_on
        self.trackbars_on           = trackbars_on
        self.red                    = red
        
        '''
        These are the options that should be in the GUI (defaults are created both in function and in the constructor)
            imshow_debug = True     this makes it print out all the debug outputs and turns on the images to display on the pi
            timeout_time = float    this sets the time after activation at which it times out and turns off if it doesn't do anything
            pitch_pid_modifier = 4 element list of the form [kp, ki, kd, setpoint]
            rotate_pid_modifier = 4 element list of the form [kp, ki, kd, setpoint]
            communication_on = True this turns on i2c communication with the arduino
            trackbars_on = True     this turns on the trackbars and the images for adjusting color (automatically turns on imshow debug as well)
            red = True              this makes it go after red targets -- False makes it go after green targets
        '''
        
        #if args['config_overwrite']:
        #    args['config_overwrite'] = json.loads(args['config_overwrite'])
        
        #print("Using Arguments=",args)
        
        #imshow_debug = False
        #if args['imshow_debug']:
        #    imshow_debug = True
        #
        #timeout_time = args['timeout_time']
        #if timeout_time is not None:
        #    timeout_time = float(timeout_time)
        #else:
        #    timeout_time = 150.0
        #
        #pitch_pid_modifier  = args['pitch_pid_modify']
        #    
        #rotate_pid_modifier = args['rotate_pid_modify']
        #
        #if args['i2c_off']:
        #    communication_on = False
        #else:
        #    communication_on = True
        self.activated = True
        
        if self.trackbars_on:
            self.imshow_debug = True

        
        cmd_file = consts.resource_paths.device_cmd_fpath
        #if args['dev_debug']:
        #    cmd_file = ''
        #    print('depthai will not load cmd file into device.')
        
        labels = []
        with open(consts.resource_paths.blob_labels_fpath) as fp:
            labels = fp.readlines()
            labels = [i.strip() for i in labels]
        
        
        
        #print('depthai.__version__ == %s' % depthai.__version__)
        #print('depthai.__dev_version__ == %s' % depthai.__dev_version__)
        
        
        
        if not depthai.init_device(cmd_file):
            print("Error initializing device. Try to reset it.")
            exit(1)
        
        
        #print('Available streams: ' + str(depthai.get_available_steams()))
        
        # Do not modify the default values in the config Dict below directly. Instead, use the `-co` argument when running this script.
        config = {
            # Possible streams:
            # ['left', 'right','previewout', 'metaout', 'depth_sipp', 'disparity', 'depth_color_h']
            # If "left" is used, it must be in the first position.
            # To test depth use:
            #'streams': [{'name': 'depth_sipp', "max_fps": 12.0}, {'name': 'previewout', "max_fps": 12.0}, ],
            #'streams': [{'name': 'previewout', "max_fps": 3.0}, {'name': 'depth_mm_h', "max_fps": 3.0}],
            'streams': [{'name': 'previewout', "max_fps": 2.0}, {'name': 'metaout', "max_fps": 2.0}],
            #'streams': ['metaout', 'previewout'],
            'depth':
            {
                'calibration_file': consts.resource_paths.calib_fpath,
                # 'type': 'median',
                'padding_factor': 0.3
            },
            'ai':
            {
                'blob_file': consts.resource_paths.blob_fpath,
                'blob_file_config': consts.resource_paths.blob_config_fpath,
                'calc_dist_to_bb': True
            },
            'board_config':
            {
                'swap_left_and_right_cameras': True, # True for 1097 (RPi Compute) and 1098OBC (USB w/onboard cameras)
                'left_fov_deg': 69.0, # Same on 1097 and 1098OBC
                #'left_to_right_distance_cm': 9.0, # Distance between stereo cameras
                'left_to_right_distance_cm': 7.5, # Distance between 1098OBC cameras
                'left_to_rgb_distance_cm': 2.0 # Currently unused
            }
        }
        
        #if args['config_overwrite'] is not None:
        #    config = utils.merge(args['config_overwrite'],config)
        #    print("Merged Pipeline config with overwrite",config)
        
        if 'depth_sipp' in config['streams'] and ('depth_color_h' in config['streams'] or 'depth_mm_h' in config['streams']):
            print('ERROR: depth_sipp is mutually exclusive with depth_color_h')
            exit(2)
            # del config["streams"][config['streams'].index('depth_sipp')]
        
        stream_names = [stream if isinstance(stream, str) else stream['name'] for stream in config['streams']]
        
        # create the pipeline, here is the first connection with the device
        p = depthai.create_pipeline(config=config)
        
        if p is None:
            print('Pipeline is not created.')
            exit(2)
        
        
        t_start = time()
        time_start = time()
        frame_count = {}
        frame_count_prev = {}
        for s in stream_names:
            frame_count[s] = 0
            frame_count_prev[s] = 0
        
        entries_prev = []
        
        
        ##################    I2C COMMUNICATION SETUP    ####################
        if self.communication_on:
            bus = smbus.SMBus(1)
        
        # I2C address of Arduino Slave
        slave_address = 0x08
        # I think this is the register we're trying to read out of/into?
        i2c_cmd = 0x01
        
        
        
        
        ################## ADDED FOR COLOR DETECTION CWM ####################
        if self.imshow_debug:
            cv2.namedWindow('g_image')
            cv2.namedWindow('r_image')
        
        if self.trackbars_on:
            cv2.namedWindow('r1_sliders')
            cv2.namedWindow('r2_sliders')
            cv2.namedWindow('g_sliders')
            
        # white blank image
        blank_image = 255 * np.ones(shape=[10, 256, 3], dtype=np.uint8)
        thrs=50
        
        # set default slider values
        thresholdValue = 15
        r1LowHue    = 0
        r1LowSat    = 145
        r1LowVal    = 145
        r1UpHue     = 10
        r1UpSat     = 255
        r1UpVal     = 255
        r2LowHue    = 170
        r2LowSat    = 160
        r2LowVal    = 135
        r2UpHue     = 179
        r2UpSat     = 255
        r2UpVal     = 255
        gLowHue     = 30
        gLowSat     = 50
        gLowVal     = 60
        # gLowSat = 120
        # gLowVal = 70
        gUpHue      = 85
        gUpSat      = 245
        gUpVal      = 255
        
        
        #cv2.createTrackbar('Hue', 'image', 80, 179, nothing)
        #cv2.createTrackbar('Sat', 'image', 127, 255, nothing)
        #cv2.createTrackbar('Val', 'image', 222, 255, nothing)
        
        if self.trackbars_on:
            
            cv2.createTrackbar('filterThresh', 'r1_sliders', thresholdValue, 100, nothing)
            cv2.createTrackbar('r1LowHue', 'r1_sliders', r1LowHue, 179, nothing)
            cv2.createTrackbar('r1LowSat', 'r1_sliders', r1LowSat, 255, nothing)
            cv2.createTrackbar('r1LowVal', 'r1_sliders', r1LowVal, 255, nothing)
            cv2.createTrackbar('r1UpHue', 'r1_sliders', r1UpHue, 179, nothing)
            cv2.createTrackbar('r1UpSat', 'r1_sliders', r1UpSat, 255, nothing)
            cv2.createTrackbar('r1UpVal', 'r1_sliders', r1UpVal, 255, nothing)
            cv2.createTrackbar('r2LowHue', 'r2_sliders', r2LowHue, 179, nothing)
            cv2.createTrackbar('r2LowSat', 'r2_sliders', r2LowSat, 255, nothing)
            cv2.createTrackbar('r2LowVal', 'r2_sliders', r2LowVal, 255, nothing)
            cv2.createTrackbar('r2UpHue', 'r2_sliders', r2UpHue, 179, nothing)
            cv2.createTrackbar('r2UpSat', 'r2_sliders', r2UpSat, 255, nothing)
            cv2.createTrackbar('r2UpVal', 'r2_sliders', r2UpVal, 255, nothing)
            cv2.createTrackbar('gLowHue', 'g_sliders', gLowHue, 179, nothing)
            cv2.createTrackbar('gLowSat', 'g_sliders', gLowSat, 255, nothing)
            cv2.createTrackbar('gLowVal', 'g_sliders', gLowVal, 255, nothing)
            cv2.createTrackbar('gUpHue', 'g_sliders', gUpHue, 179, nothing)
            cv2.createTrackbar('gUpSat', 'g_sliders', gUpSat, 255, nothing)
            cv2.createTrackbar('gUpVal', 'g_sliders', gUpVal, 255, nothing)
        
        
        ## red ball mask areas
        #red_mask_1 = cv2.inRange(im_hsv, (0, 120, 70), (10, 255, 255))
        #red_mask_2 = cv2.inRange(im_hsv, (170, 120, 70), (179, 255, 255)) 
        
        #lower_red1 = np.array([0, 120, 100])
        #upper_red1 = np.array([10, 255, 255])
        #lower_red2 = np.array([170, 120, 100])
        #upper_red2 = np.array([179, 255, 255])
        
        #green mask area centered around 
        # (80, 127, 222)
        #green_mask = cv2.inRange(im_hsv, (55, 120, 70), (105, 255, 255)) 
        #    lower_green = np.array([40, 10, 200])
        #    upper_green = np.array([120, 245, 255])
        
        #lower_green = np.array([55, 120, 70])
        #upper_green = np.array([105, 255, 255])
        
        
         #sets how much to blur
        filt=39
        exitNow=0
        pause=0
        ####################################################################
        
        
        
        
        ####################################################################
        #Setting up the targeting program:
        
        
        
        p_kp = 0.1
        p_ki = 0.0
        p_kd = 0.01
        yref = 150.0
        
        r_kp = 0.1
        r_ki = 0.0
        r_kd = 0.01
        xref = 150.0
        
        bad_guy_center = None
        
        if self.pitch_pid_modifier is not None:
            ppid_args_length = len(self.pitch_pid_modifier)
            if ppid_args_length is 4:
                try:
                    p_kp                  = float(self.pitch_pid_modifier[0])
                    p_ki                  = float(self.pitch_pid_modifier[1])
                    p_kd                  = float(self.pitch_pid_modifier[2])
                    yref                  = float(self.pitch_pid_modifier[3])
                except:
                    print("PITCH ARGUMENT ERROR -- INCORRECT PITCH PID ARGUMENTS SYNTAX -- RETURNING TO DEFAULT VALUES")
                    yref = 150.0
                    p_kp = 0.1
                    p_ki = 0.0
                    p_kd = 0.01        
            else:
                print("ERROR -- INCORRECT LENGTH OF PITCH PID ARGUMENTS, LENGTH SHOULD BE 4, BUT LENGTH WAS: " + str(ppid_args_length))
        
        
        if self.rotate_pid_modifier is not None:
            rpid_args_length = len(self.rotate_pid_modifier)
            if rpid_args_length is 4:
                try:
                    r_kp                  = float(self.rotate_pid_modifier[0])
                    r_ki                  = float(self.rotate_pid_modifier[1])
                    r_kd                  = float(self.rotate_pid_modifier[2])
                    xref                  = float(self.rotate_pid_modifier[3])
                except:
                    print("ROTATE ARGUMENT ERROR -- INCORRECT ROTATE PID ARGUMENTS SYNTAX -- RETURNING TO DEFAULT VALUES")
                    xref = 150.0
                    r_kp = 0.1
                    r_ki = 0.0
                    r_kd = 0.01        
            else:
                print("ERROR -- INCORRECT LENGTH OF ROTATE PID ARGUMENTS, LENGTH SHOULD BE 4, BUT LENGTH WAS: " + str(rpid_args_length))
        
        list_of_pid_params = [p_kp, p_ki, p_kd, yref, r_kp, r_ki, r_kd, xref]
        if self.imshow_debug:
            print(list_of_pid_params)
        
        ####################################################################
        
        
        
        
        while True:
            # retreive data from the device
            # data is stored in packets, there are nnet (Neural NETwork) packets which have additional functions for NNet result interpretation
            nnet_packets, data_packets = p.get_available_nnet_and_data_packets()
        
            for i, nnet_packet in enumerate(nnet_packets):
                # the result of the MobileSSD has detection rectangles (here: entries), and we can iterate threw them
                for i, e in enumerate(nnet_packet.entries()):
                    # for MobileSSD entries are sorted by confidence
                    # {id == -1} or {confidence == 0} is the stopper (special for OpenVINO models and MobileSSD architecture)
                    if e[0]['id'] == -1.0 or e[0]['confidence'] == 0.0:
                        break
        
                    if i == 0:
                        entries_prev.clear()
        
                    # save entry for further usage (as image package may arrive not the same time as nnet package)
                    entries_prev.append(e)
        
            for packet in data_packets:
                if packet.stream_name not in stream_names:
                    continue # skip streams that were automatically added
                elif packet.stream_name == 'previewout':
                    data = packet.getData()
                    # the format of previewout image is CHW (Chanel, Height, Width), but OpenCV needs HWC, so we
                    # change shape (3, 300, 300) -> (300, 300, 3)
                    data0 = data[0,:,:]
                    data1 = data[1,:,:]
                    data2 = data[2,:,:]
                    frame = cv2.merge([data0, data1, data2])
                    
                    
                    
                    
                    
                    
        
                    ####################### ADDED FOR COLOR DETECTION CWM #######################
                    
                    try:
        
                        imgInit = frame
                        
                        imgBGR = cv2.resize(imgInit,(300, 300),cv2.INTER_AREA)
                        r_imgBGR = imgBGR.copy()
                        g_imgBGR = imgBGR.copy()
                        img=cv2.cvtColor(imgBGR, cv2.COLOR_BGR2HSV)
        
        
                        if self.trackbars_on:
                            thresholdValue = cv2.getTrackbarPos('filterThresh', 'r1_sliders')
                            r1LowHue    = cv2.getTrackbarPos('r1LowHue', 'r1_sliders')
                            r1LowSat    = cv2.getTrackbarPos('r1LowSat', 'r1_sliders')
                            r1LowVal    = cv2.getTrackbarPos('r1LowVal', 'r1_sliders')
                            r1UpHue     = cv2.getTrackbarPos('r1UpHue', 'r1_sliders')
                            r1UpSat     = cv2.getTrackbarPos('r1UpSat', 'r1_sliders')
                            r1UpVal     = cv2.getTrackbarPos('r1UpVal', 'r1_sliders')
                            r2LowHue    = cv2.getTrackbarPos('r2LowHue', 'r2_sliders')
                            r2LowSat    = cv2.getTrackbarPos('r2LowSat', 'r2_sliders')
                            r2LowVal    = cv2.getTrackbarPos('r2LowVal', 'r2_sliders')
                            r2UpHue     = cv2.getTrackbarPos('r2UpHue', 'r2_sliders')
                            r2UpSat     = cv2.getTrackbarPos('r2UpSat', 'r2_sliders')
                            r2UpVal     = cv2.getTrackbarPos('r2UpVal', 'r2_sliders')
                            gLowHue     = cv2.getTrackbarPos('gLowHue', 'g_sliders')
                            gLowSat     = cv2.getTrackbarPos('gLowSat', 'g_sliders')
                            gLowVal     = cv2.getTrackbarPos('gLowVal', 'g_sliders')
                            gUpHue      = cv2.getTrackbarPos('gUpHue', 'g_sliders')
                            gUpSat      = cv2.getTrackbarPos('gUpSat', 'g_sliders')
                            gUpVal      = cv2.getTrackbarPos('gUpVal', 'g_sliders')
                        
        
                        lower_red1 = np.array([r1LowHue, r1LowSat, r1LowVal])
                        upper_red1 = np.array([r1UpHue, r1UpSat, r1UpVal])
        
                        lower_red2 = np.array([r2LowHue, r2LowSat, r2LowVal])
                        upper_red2 = np.array([r2UpHue, r2UpSat, r2UpVal])
        
                        lower_green = np.array([gLowHue, gLowSat, gLowVal])
                        upper_green = np.array([gUpHue, gUpSat, gUpVal])
        
        
                        red1 = [[[upper_red1[0]-((upper_red1[0]-lower_red1[0])/2),upper_red1[1]-((upper_red1[1]-lower_red1[1])/2),upper_red1[2]-((upper_red1[2]-lower_red1[2])/2)]]]
                        red2 = [[[upper_red2[0]-((upper_red2[0]-lower_red2[0])/2),upper_red2[1]-((upper_red2[1]-lower_red2[1])/2),upper_red2[2]-((upper_red2[2]-lower_red2[2])/2)]]]
                        green = [[[upper_green[0]-((upper_green[0]-lower_green[0])/2),upper_green[1]-((upper_green[1]-lower_green[1])/2),upper_green[2]-((upper_green[2]-lower_green[2])/2)]]]
                        red1=np.array(red1, dtype="uint8")
                        red2=np.array(red2, dtype="uint8")
                        green=np.array(green, dtype="uint8")
                        redColor1 = cv2.cvtColor(red1, cv2.COLOR_HSV2BGR)##Tr
                        redColor2 = cv2.cvtColor(red2, cv2.COLOR_HSV2BGR)##Tr
                        greenColor = cv2.cvtColor(green, cv2.COLOR_HSV2BGR)##Tr
            
                        
                        gmask=np.uint8(cv2.inRange(img,lower_green,upper_green))
                        rmask1=np.uint8(cv2.inRange(img,lower_red1,upper_red1))
                        rmask2=np.uint8(cv2.inRange(img,lower_red2,upper_red2))
                        rmask = rmask1+rmask2
            
                        rvis = np.uint8(img.copy())
                        gvis = np.uint8(img.copy())
                        rvis[rmask==0]=(0,0,0)
                        gvis[gmask==0]=(0,0,0)
        
                        
                        rgray = rvis[:,:,2]
                        ggray = gvis[:,:,2]
        
            
                        rblurred = cv2.GaussianBlur(rgray, (filt, filt), 0)
                        gblurred = cv2.GaussianBlur(ggray, (filt, filt), 0)
        
                        rthresh = cv2.threshold(rblurred, thresholdValue, 255, cv2.THRESH_BINARY)[1]
                        gthresh = cv2.threshold(gblurred, thresholdValue, 255, cv2.THRESH_BINARY)[1]
                        
                        red_locations = np.where(rthresh == [255])
                        green_locations = np.where(gthresh == [255])
                        
                        x_red_avg = np.mean(red_locations[1])
                        y_red_avg = np.mean(red_locations[0])
                        
                        x_green_avg = np.mean(green_locations[1])
                        y_green_avg = np.mean(green_locations[0])
                        
        ###### current position that we want to drive to the reference by moving the turret!    ######
                        if self.red:
                            bad_guy_center = (x_red_avg, y_red_avg)
                        else:
                            bad_guy_center = (x_green_avg, y_red_avg)
        ######                                                                                  ######
                        

             
                        if self.imshow_debug:
                            ccr1=(int(redColor1[0][0][0]),int(redColor1[0][0][1]),int(redColor1[0][0][2]))
                            ccr2=(int(redColor2[0][0][0]),int(redColor2[0][0][1]),int(redColor2[0][0][2]))
                            ccg=(int(greenColor[0][0][0]),int(greenColor[0][0][1]),int(greenColor[0][0][2]))
                            
                            cv2.circle(r_imgBGR, (25, 25), 20, ccr1, -1)
                            cv2.circle(r_imgBGR, (70, 25), 20, ccr2, -1)
                            cv2.circle(g_imgBGR, (25, 25), 20, ccg, -1)
            
                        rvisBGR=cv2.cvtColor(rvis, cv2.COLOR_HSV2BGR)
                        gvisBGR=cv2.cvtColor(gvis, cv2.COLOR_HSV2BGR)
                        
                        rthresh = cv2.cvtColor(rthresh, cv2.COLOR_GRAY2BGR)
                        gthresh = cv2.cvtColor(gthresh, cv2.COLOR_GRAY2BGR)
                        
                        
                        if green_locations[0].size != 0:
                            green_left   = np.amin(green_locations[1])
                            green_right  = np.amax(green_locations[1])
                            green_top    = np.amin(green_locations[0])
                            green_bottom = np.amax(green_locations[0])
                            if self.imshow_debug:
                                gvisBGR = cv2.rectangle(gvisBGR, (green_left, green_top), (green_right, green_bottom), (0, 255, 0), 2)
                                gvisBGR = cv2.putText(gvisBGR, 'GOOD GUY', (green_left, green_top-5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, cv2.LINE_AA)
                                gvisBGR = cv2.putText(gvisBGR, "Center:", (green_left, green_bottom+15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, cv2.LINE_AA)
                                gvisBGR = cv2.putText(gvisBGR, "x = " + str(round(x_green_avg)), (green_left, green_bottom+30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1, cv2.LINE_AA)
                                gvisBGR = cv2.putText(gvisBGR, "y = " + str(round(y_green_avg)), (green_left, green_bottom+45), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1, cv2.LINE_AA)
        
        
                            
                        if red_locations[0].size != 0:
                            red_left   = np.amin(red_locations[1])
                            red_right  = np.amax(red_locations[1])
                            red_top    = np.amin(red_locations[0])
                            red_bottom = np.amax(red_locations[0])
                            if self.imshow_debug:
                                rvisBGR = cv2.rectangle(rvisBGR, (red_left, red_top), (red_right, red_bottom), (0, 0, 255), 2)
                                rvisBGR = cv2.putText(rvisBGR, 'BAD GUY', (red_left, red_top-5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1, cv2.LINE_AA)
                                rvisBGR = cv2.putText(rvisBGR, "Center:", (red_left, red_bottom+15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1, cv2.LINE_AA)
                                rvisBGR = cv2.putText(rvisBGR, "x = " + str(round(x_red_avg)), (red_left, red_bottom+30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1, cv2.LINE_AA)
                                rvisBGR = cv2.putText(rvisBGR, "y = " + str(round(y_red_avg)), (red_left, red_bottom+45), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1, cv2.LINE_AA)
                            
                            
        
                        if self.imshow_debug:
                            cv2.imshow('r_image',np.hstack([r_imgBGR, rthresh, rvisBGR])) #np.hstack([original, vis]))#np.hstack([thresh, gray2]))
                            cv2.imshow('g_image',np.hstack([g_imgBGR, gthresh, gvisBGR])) #np.hstack([original, vis]))#np.hstack([thresh, gray2]))
                            
                        if self.trackbars_on:
                            cv2.imshow('g_sliders', blank_image)
                            cv2.imshow('r1_sliders', blank_image)
                            cv2.imshow('r2_sliders', blank_image)
                        
        #                    cv2.imshow('image',np.hstack([imgBGR, visBGR])) #np.hstack([original, vis]))#np.hstack([thresh, gray2]))
                        
                    except KeyboardInterrupt:
                        raise
                    except cv2.error as e:
                
                        print("Here it is \n",str(e), "\n")
                        if similar(str(e), " /home/pi/opencv-3.3.0/modules/imgproc/src/imgwarp.cpp:3483: error: (-215) ssize.width > 0 && ssize.height > 0 in function resize")>.8:
                            print("\n\n\n\n Your video appears to have ended\n\n\n")
                        break
                            
                    
                    
        ###########################################################################
                    
                    
        
                    img_h = frame.shape[0]
                    img_w = frame.shape[1]
                    
                    
                    confidences = []
                    #cwm: Takes highest confidence value and selects only that object
                    for e in entries_prev:
                        confidences.append(e[0]['confidence'])
                        
                    # print(confidences)
                    
                    new_entries_prev = entries_prev
                    
                    if confidences:
                        index_of_max_confidence = np.argmax(confidences)
                    
                        new_entries_prev = []
                        new_entries_prev.append(entries_prev[index_of_max_confidence])
                    
        
                    # iterate through pre-saved entries & draw rectangle & text on image:
                    for e in new_entries_prev:
                        # the lower confidence threshold - the more we get false positives
                        if e[0]['confidence'] > 0.5:
                            x1 = int(e[0]['left'] * img_w)
                            y1 = int(e[0]['top'] * img_h)
        
                            pt1 = x1, y1
                            pt2 = int(e[0]['right'] * img_w), int(e[0]['bottom'] * img_h)
        
                            if self.imshow_debug:
                                cv2.rectangle(frame, pt1, pt2, (0, 0, 255))
        
                            # Handles case where TensorEntry object label = 7552.
                            if e[0]['label'] > len(labels):
                                print("Label index=",e[0]['label'], "is out of range. Not applying text to rectangle.")
                            else:
                                pt_t1 = x1, y1 + 20
                                if self.imshow_debug:
                                    cv2.putText(frame, labels[int(e[0]['label'])], pt_t1, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
                                    print('\n')
                                    print(labels[int(e[0]['label'])])
        
                                pt_t2 = x1, y1 + 40
                                if self.imshow_debug:
                                    cv2.putText(frame, '{:.2f}'.format(100*e[0]['confidence']) + ' %', pt_t2, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))
                                    print('{:.2f}'.format(100*e[0]['confidence']) + ' %')
                                if config['ai']['calc_dist_to_bb']:
                                    pt_t3 = x1, y1 + 60
                                    pt_t4 = x1, y1 + 80
                                    pt_t5 = x1, y1 + 100
                                    
                                    if self.imshow_debug:
                                        cv2.putText(frame, 'x:' '{:7.3f}'.format(e[0]['distance_x']) + ' m', pt_t3, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))
                                        cv2.putText(frame, 'y:' '{:7.3f}'.format(e[0]['distance_y']) + ' m', pt_t4, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))
                                        cv2.putText(frame, 'z:' '{:7.3f}'.format(e[0]['distance_z']) + ' m', pt_t5, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))
                                        print('x:' '{:7.3f}'.format(e[0]['distance_x']) + ' m')                            
                                        print('y:' '{:7.3f}'.format(e[0]['distance_y']))
                                        print('z:' '{:7.3f}'.format(e[0]['distance_z']))
                    
                    
                    if self.imshow_debug:
                        print('----------frame over ----------')
        
        
        
        
        
                    if self.imshow_debug:
                        cv2.putText(frame, "fps: " + str(frame_count_prev[packet.stream_name]), (25, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 0))
                        cv2.imshow('previewout', frame)
                elif packet.stream_name == 'left' or packet.stream_name == 'right' or packet.stream_name == 'disparity':
                    frame_bgr = packet.getData()
                    if self.imshow_debug:
                        cv2.putText(frame_bgr, packet.stream_name, (25, 25), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 0))
                        cv2.putText(frame_bgr, "fps: " + str(frame_count_prev[packet.stream_name]), (25, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 0))
                        cv2.imshow(packet.stream_name, frame_bgr)
                elif packet.stream_name.startswith('depth'):
                    frame = packet.getData()
                    #print(frame)
        
                    if self.imshow_debug:
                        if len(frame.shape) == 2:
                            if frame.dtype == np.uint8: # grayscale
                                cv2.putText(frame, packet.stream_name, (25, 25), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255))
                                cv2.putText(frame, "fps: " + str(frame_count_prev[packet.stream_name]), (25, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255))
                                cv2.imshow(packet.stream_name, frame)
                            else: # uint16
                                #print("frame before integer division")
                                #print(frame)
                                frame = (65535 // frame).astype(np.uint8)
                                #print("frame after integer division")
                                #print(frame)
                                #colorize depth map, comment out code below to obtain grayscale
                                frame = cv2.applyColorMap(frame, cv2.COLORMAP_HOT)
                                # frame = cv2.applyColorMap(frame, cv2.COLORMAP_JET)
                                cv2.putText(frame, packet.stream_name, (25, 25), cv2.FONT_HERSHEY_SIMPLEX, 1.0, 255)
                                cv2.putText(frame, "fps: " + str(frame_count_prev[packet.stream_name]), (25, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, 255)
                                cv2.imshow(packet.stream_name, frame)
                        else: # bgr
                            cv2.putText(frame, packet.stream_name, (25, 25), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255))
                            cv2.putText(frame, "fps: " + str(frame_count_prev[packet.stream_name]), (25, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, 255)
                            cv2.imshow(packet.stream_name, frame)
                
                frame_count[packet.stream_name] += 1
            
            
            
            
            # This is the end of the "while loop" -- pid and command sending will likely work here.    
        #################### PID AND COMMUNICATIONS #################################################
                    
            # compute new ouput from the PID according to the systems current value
        #            control = pid(v)
            if bad_guy_center is not None:
                bad_guy_x = bad_guy_center[0]
                bad_guy_y = bad_guy_center[1]
                
                pitch_controller    = simple_pid.PID(p_kp, p_ki, p_kd, setpoint=yref)
                pitch_command       = round(pitch_controller(bad_guy_y), 2)
                p_cmd               = "{:.2f}".format(pitch_command)
                
                rotate_controller   = simple_pid.PID(r_kp, r_ki, r_kd, setpoint=xref)
                rotate_command      = round(rotate_controller(bad_guy_x), 2)
                r_cmd               = "{:.2f}".format(rotate_command)
                
                # if commands go to within some range, send command to fire
                if abs(pitch_command) < 0.5 and abs(rotate_command) < 0.5:
                    p_cmd = "f"
                    r_cmd = "f"
                    
                ############## FIX COMMANDS TODO FOR SCOTT ################
                ##############   MAKE I2C WORK ENCODERS    ################
                    
                    
                
                
                self.pitch_value  = p_cmd
                self.rotate_value = r_cmd                
                
                
                if self.imshow_debug:
                    print("pitch command = " + p_cmd)
                    print("rotation command = " + r_cmd)
                
                
                # Then send commands to arduino over i2c wire or USB 
                if self.communication_on:
                    bytesToSend = ConvertStringToBytes(r_cmd + ',' + p_cmd)
                    print(bytesToSend)
                    bus.write_i2c_block_data(slave_address, i2c_cmd, bytesToSend)
            
            
            
            
        #########################################################################################    
            
            
            
            
            t_curr = time()
            if t_start + 1.0 < t_curr:
                t_start = t_curr
        
                for s in stream_names:
                    frame_count_prev[s] = frame_count[s]
                    frame_count[s] = 0
        
            if cv2.waitKey(1) == ord('q'):
                break
            
            if t_curr - time_start > self.timeout_time:
                print('########################################################')
                print('TIMED OUT')
                print('########################################################')
                break
        
        del p  # in order to stop the pipeline object should be deleted, otherwise device will continue working. This is required if you are going to add code after the main loop, otherwise you can ommit it.
        
        cv2.destroyAllWindows()
        
        print('py: DONE.')
        self.activated = False
        
import depthai # access the camera and its data packets
import consts.resource_paths # load paths to depthai resources

# Load the device cmd file
if not depthai.init_device(consts.resource_paths.device_cmd_fpath):
    print("Error initializing device. Try to reset it.")
    exit(1)

# Create the pipeline using the 'metaout' and 'previewout' streams, establishing the first connection to the device.
p = depthai.create_pipeline(
    {
        # metaout - contains neural net output
        # previewout - color video
        'streams': ['metaout','previewout'],
        'ai':
        {
            # The paths below are based on the tutorial steps.
            'blob_file': "/home/pi/open_model_zoo_downloads/Retail/object_detection/face/sqnet1.0modif-ssd/0004/dldt/FP16/face-detection-retail-0004.blob",
            'blob_file_config': "/home/pi/open_model_zoo_downloads/Retail/object_detection/face/sqnet1.0modif-ssd/0004/dldt/FP16/face-detection-retail-0004.json"
        }
    }
)

if p is None:
    print('Pipeline was not created.')
    exit(2)

# Maintains a list of detections that will be applied to to the previewout stream.
# Only the most recent detections are applied.
entries_prev = []
Пример #11
0
parser.add_argument('--labels',
                    help='File path of labels file.',
                    required=True)
parser.add_argument('--threshold', help='Confidence threshold.', default=0.3)
args = parser.parse_args()

if __name__ == "__main__":
    detector = Detector(args.labels, args.model, args.threshold)

    if not depthai.init_device(consts.resource_paths.device_cmd_fpath):
        raise RuntimeError("Error initializing device. Try to reset it.")

    p = depthai.create_pipeline(
        config={
            "streams": ["metaout", "previewout"],
            "ai": {
                "blob_file": args.model,
                "blob_file_config": 'yolov2/YOLO_best_mAP_alt.json'
            }
        })

    if p is None:
        raise RuntimeError("Error initializing pipelne")
    recv = False
    while True:
        nnet_packets, data_packets = p.get_available_nnet_and_data_packets()

        for nnet_packet in nnet_packets:
            raw_detections = nnet_packet.get_tensor(0)
            raw_detections.dtype = np.float16
            raw_detections = np.squeeze(raw_detections)
            output_shape = [5, 6, 7, 7]
Пример #12
0
def main():
    args = vars(parse_args())

    if args['config_overwrite']:
        args['config_overwrite'] = json.loads(args['config_overwrite'])

    print("Using Arguments=", args)

    if 'capture' in args['mode']:

        # Delete Dataset directory if asked
        if args['image_op'] == 'delete':
            shutil.rmtree('dataset/')

        # Creates dirs to save captured images
        try:
            for path in ["left", "right"]:
                Path("dataset/" + path).mkdir(parents=True, exist_ok=True)
        except OSError as e:
            print(
                "An error occurred trying to create image dataset directories:",
                e)
            exit(0)

        # Create Depth AI Pipeline to start video streaming
        cmd_file = consts.resource_paths.device_cmd_fpath

        # Possible to try and reboot?
        # The following doesn't work (have to manually hit switch on device)
        # depthai.reboot_device
        # time.sleep(1)
        if not depthai.init_device(cmd_file=cmd_file):
            print(
                "[ERROR] Unable to initialize device. Try to reset it. Exiting."
            )
            exit(1)

        config = {
            'streams': ['left', 'right'],
            'depth': {
                'calibration_file': consts.resource_paths.calib_fpath,
                # 'type': 'median',
                'padding_factor': 0.3
            },
            'ai': {
                'blob_file': consts.resource_paths.blob_fpath,
                'blob_file_config': consts.resource_paths.blob_config_fpath
            },
            'board_config': {
                'swap_left_and_right_cameras': True,
                'left_fov_deg': 69.0,
                'left_to_right_distance_cm': 9.0
            }
        }

        if args['config_overwrite'] is not None:
            config = utils.merge(args['config_overwrite'], config)
            print("Merged Pipeline config with overwrite", config)

        pipeline = depthai.create_pipeline(config)

        if pipeline is None:
            print("[ERROR] Unable to create pipeline. Exiting.")
            exit(2)

        num_of_polygons = 0
        polygons_coordinates = []

        image_per_polygon_counter = 0  # variable to track how much images were captured per each polygon
        complete = False  # Indicates if images have been captured for all polygons

        polygon_index = args['polygons'][
            0]  # number to track which polygon is currently using
        total_num_of_captured_images = 0  # variable to hold total number of captured images

        capture_images = False  # value to track the state of capture button (spacebar)
        captured_left_image = False  # value to check if image from the left camera was capture
        captured_right_image = False  # value to check if image from the right camera was capture

        run_capturing_images = True  # value becames False and stop the main loop when all polygon indexes were used

        calculate_coordinates = False  # track if coordinates of polynoms was calculated
        total_images = args['count'] * len(args['polygons'])

        # Chessboard detection termination criteria
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30,
                    0.001)
        check_frame = 30
        capture_failed = False
        leftcnt = 0
        rightcnt = 0
        leftcolor = (0, 0, 255)
        rightcolor = (0, 0, 255)

        while run_capturing_images:
            _, data_list = pipeline.get_available_nnet_and_data_packets()
            for packet in data_list:
                if packet.stream_name == 'left' or packet.stream_name == 'right':
                    if packet.stream_name == 'left':
                        leftcnt += 1
                    if packet.stream_name == 'right':
                        rightcnt += 1

                    frame = packet.getData()

                    if calculate_coordinates == False:
                        height, width = frame.shape
                        polygons_coordinates = setPolygonCoordinates(
                            height, width)
                        # polygons_coordinates = select_polygon_coords(polygons_coordinates,args['polygons'])
                        num_of_polygons = len(args['polygons'])
                        print(
                            "Starting image capture. Press the [ESC] key to abort."
                        )
                        print(
                            "Will take %i total images, %i per each polygon." %
                            (total_images, args['count']))
                        calculate_coordinates = True

                    frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)

                    if capture_images == True:
                        if packet.stream_name == 'left':
                            if find_chessboard(frame, small=False):
                                filename = image_filename(
                                    packet.stream_name, polygon_index,
                                    total_num_of_captured_images)
                                cv2.imwrite("dataset/left/" + str(filename),
                                            frame)
                                print("py: Saved image as: " + str(filename))
                                captured_left_image = True
                            else:
                                print(
                                    "py: could not find chessboard, try again")
                                capture_failed = True
                                capture_images, captured_left_image, captured_right_image = False, False, False

                        elif packet.stream_name == 'right':
                            if find_chessboard(frame, small=False):
                                filename = image_filename(
                                    packet.stream_name, polygon_index,
                                    total_num_of_captured_images)
                                cv2.imwrite("dataset/right/" + str(filename),
                                            frame)
                                print("py: Saved image as: " + str(filename))
                                captured_right_image = True
                            else:
                                print(
                                    "py: could not find chess board, try again"
                                )
                                capture_failed = True
                                capture_images, captured_left_image, captured_right_image = False, False, False

                        if captured_right_image == True and captured_left_image == True:
                            capture_failed = False
                            capture_images = False
                            captured_left_image = False
                            captured_right_image = False
                            total_num_of_captured_images += 1
                            image_per_polygon_counter += 1

                            if image_per_polygon_counter == args['count']:
                                image_per_polygon_counter = 0
                                try:
                                    polygon_index = args['polygons'][
                                        args['polygons'].index(polygon_index) +
                                        1]
                                except IndexError:
                                    complete = True

                    if complete == False:
                        if rightcnt % check_frame == 0:
                            # Find the chess board corners once a second
                            if find_chessboard(frame):
                                rightcolor = (0, 255, 0)
                            else:
                                rightcolor = (0, 0, 255)
                        if leftcnt % check_frame == 0:
                            if find_chessboard(frame):
                                leftcolor = (0, 255, 0)
                            else:
                                leftcolor = (0, 0, 255)

                        cv2.putText(
                            frame,
                            "Align cameras with callibration board and press spacebar to capture the image:",
                            (0, 25), cv2.FONT_HERSHEY_SIMPLEX, 1.0,
                            (0, 255, 0))
                        cv2.putText(
                            frame, "Polygon Position: %i. " % (polygon_index) +
                            "Captured %i of %i images." %
                            (total_num_of_captured_images, total_images),
                            (0, 700), cv2.FONT_HERSHEY_SIMPLEX, 1.0,
                            (255, 0, 0))
                        if capture_failed:
                            cv2.putText(
                                frame,
                                "No chessboard detected, please try again ",
                                (0, 60), cv2.FONT_HERSHEY_SIMPLEX, 1.0,
                                (0, 0, 255))

                        if packet.stream_name == 'left':
                            cv2.polylines(
                                frame,
                                np.array([
                                    getPolygonCoordinates(
                                        polygon_index, polygons_coordinates)
                                ]), True, leftcolor, 4)
                        else:
                            cv2.polylines(
                                frame,
                                np.array([
                                    getPolygonCoordinates(
                                        polygon_index, polygons_coordinates)
                                ]), True, rightcolor, 4)

                        frame = cv2.resize(frame, (0, 0), fx=0.8, fy=0.8)
                        cv2.imshow(packet.stream_name, frame)

                    else:
                        # all polygons used, stop the loop
                        run_capturing_images = False

            key = cv2.waitKey(1)

            if key == ord(" "):
                capture_images = True

            elif key == ord("q"):
                print("py: Calibration has been interrupted!")
                exit(0)

        del pipeline  # need to manualy delete the object, because of size of HostDataPacket queue runs out (Not enough free space to save {stream})

        cv2.destroyWindow("left")
        cv2.destroyWindow("right")

    else:
        print("Skipping capture.")

    if 'process' in args['mode']:
        print("Starting image processing")
        cal_data = StereoCalibration()
        try:
            cal_data.calibrate("dataset", args['square_size_cm'],
                               "./resources/depthai.calib")
        except AssertionError as e:
            print("[ERROR] " + str(e))
            exit(1)
    else:
        print("Skipping process.")

    print('py: DONE.')
Пример #13
0
def main():
    nfeatures = 2000  # Number of keypoints to sample
    inlierThreshold = 0.001
    nms_dist = 4
    conf_thresh = 0.005
    nn_thresh = 0.7
    h = 100
    w = 100
    res = '{}x{}'.format(h, w)

    if not depthai.init_device(consts.resource_paths.device_cmd_fpath):
        raise RuntimeError("Error initializing device. Try to reset it.")

    local_dir = os.getcwd()
    p = depthai.create_pipeline(
        config={
            "streams": ["previewout", "metaout"],
            "ai": {
                "blob_file":
                os.path.join(local_dir, "output", "superpoint_{}.blob".format(
                    res)),
                "blob_file_config":
                os.path.join(local_dir, "output", "superpoint_{}.json".format(
                    res)),
                "calc_dist_to_bb":
                False,
            },
            "app": {
                "sync_video_meta_stream": True
            },
        })

    if p is None:
        raise RuntimeError("Error initializing pipelne")

    sp = SuperPointWrapper(h=h,
                           w=w,
                           nms_dist=nms_dist,
                           conf_thresh=conf_thresh,
                           nn_thresh=nn_thresh)
    kp_desc_list = []
    while True:
        nnet_packets, data_packets = p.get_available_nnet_and_data_packets()
        print('nnet_packets: ', len(nnet_packets))
        packets_len = len(nnet_packets) + len(data_packets)
        print('packets_len: ', packets_len)

        kp_desc_list.extend(sp.decode_packets(nnet_packets))

        for nnet_packet, packet in zip(nnet_packets, data_packets):
            if packet.stream_name == 'previewout':
                data = packet.getData()
                data0 = data[0, :, :]
                frame = cv2.cvtColor(data0, cv2.COLOR_GRAY2BGR)

                if len(kp_desc_list) > 0:
                    kps, _ = kp_desc_list.pop(0)
                    for k in kps:
                        print(k[:2])
                        cv2.circle(frame, (int(k[0]), int(k[1])), 3,
                                   (0, 0, 255), -1)

                frame = cv2.resize(frame, (240, 240))

                cv2.imshow('previewout', frame)

        if cv2.waitKey(1) == ord('q'):
            break

    del p
    depthai.deinit_device()
import numpy as np  # numpy - manipulate the packet data returned by depthai
import cv2  # opencv - display the video stream
import depthai  # access the camera and its data packets
import consts.resource_paths  # load paths to depthai resources

if not depthai.init_device(consts.resource_paths.device_cmd_fpath):
    raise RuntimeError("Error initializing device. Try to reset it.")

# Create the pipeline using the 'previewout' stream, establishing the first connection to the device.
pipeline = depthai.create_pipeline(config={
    'streams': ['previewout', 'metaout'],
    'ai': {
        # The paths below are based on the tutorial steps.
        "blob_file": "/root/open_model_zoo_downloads/intel/face-detection-retail-0004/FP16/face-detection-retail-0004.bin",
        "blob_file_config": "/root/open_model_zoo_downloads/intel/face-detection-retail-0004/FP16/face-detection-retail-0004.json",
    }
})

if pipeline is None:
    raise RuntimeError('Pipeline creation failed!')

entries_prev = []

while True:
    # Retrieve data packets from the device.
    # A data packet contains the video frame data.
    nnet_packets, data_packets = pipeline.get_available_nnet_and_data_packets()

    for _, nnet_packet in enumerate(nnet_packets):
        entries_prev = []
        for _, e in enumerate(nnet_packet.entries()):
Пример #15
0
    def setup():

        global pitch_pid_modifier
        global rotate_pid_modifier
        global t_start = time()
        global time_start = time()
        global frame_count = {}
        global frame_count_prev = {}
        global bus 
        global slave_address 
        global i2c_cmd

        self.arg_setup()

        # imshow_debug = False
        if self.args['imshow_debug']:
            self.imshow_debug = True

        self.timeout_time = self.tmout() 

        pitch_pid_modifier  = self.args['pitch_pid_modify']
    
        rotate_pid_modifier = self.args['rotate_pid_modify']

        if self.args['i2c_off']:
            self.communication_on = False
        else:
            self.communication_on = True

        trackbars_on = False
        if self.args['create_trackbars']:
            self.trackbars_on = True
            trackbars_on = True
            self.imshow_debug = True

        # cmd_file = consts.resource_paths.device_cmd_fpath
        if self.args['dev_debug']:
            self.cmd_file = ''
            print('depthai will not load cmd file into device.')

        with open(consts.resource_paths.blob_labels_fpath) as fp:
            self.labels = fp.readlines()
            self.labels = [i.strip() for i in labels]

        print('depthai.__version__ == %s' % depthai.__version__)
        print('depthai.__dev_version__ == %s' % depthai.__dev_version__)

                
        if not depthai.init_device(self.cmd_file):
            print("Error initializing device. Try to reset it.")
            exit(1)


        print('Available streams: ' + str(depthai.get_available_steams()))


        if self.args['config_overwrite'] is not None:
            config = utils.merge(self.args['config_overwrite'],self.config)
            print("Merged Pipeline config with overwrite",config)

        if 'depth_sipp' in config['streams'] and ('depth_color_h' in config['streams'] or 'depth_mm_h' in config['streams']):
            print('ERROR: depth_sipp is mutually exclusive with depth_color_h')
            exit(2)
            # del config["streams"][config['streams'].index('depth_sipp')]

        self.stream_names = [stream if isinstance(stream, str) else stream['name'] for stream in config['streams']]

        # create the pipeline, here is the first connection with the device
        self.p = depthai.create_pipeline(self.config=config)

        if self.p is None:
            print('Pipeline is not created.')
            exit(2)


        # t_start = time()
        # time_start = time()
        # frame_count = {}
        # frame_count_prev = {}
        for s in self.stream_names:
            frame_count[s] = 0
            frame_count_prev[s] = 0

        self.entries_prev = []


        ##################    I2C COMMUNICATION SETUP    ####################
        if self.communication_on:
            self.bus = smbus.SMBus(1)

        ################## ADDED FOR COLOR DETECTION CWM ####################
        if self.imshow_debug:
            cv2.namedWindow('g_image')
            cv2.namedWindow('r_image')

        # if self.trackbars_on:
        #     cv2.namedWindow('r1_sliders')
        #     cv2.namedWindow('r2_sliders')
        #     cv2.namedWindow('g_sliders')
        
        if trackbars_on:
            cv2.namedWindow('r1_sliders')
            cv2.namedWindow('r2_sliders')
            cv2.namedWindow('g_sliders')
    
        # white blank image
        self.blank_image = 255 * np.ones(shape=[10, 256, 3], dtype=np.uint8)
        self.thrs=50




        #cv2.createTrackbar('Hue', 'image', 80, 179, nothing)
        #cv2.createTrackbar('Sat', 'image', 127, 255, nothing)
        #cv2.createTrackbar('Val', 'image', 222, 255, nothing)

        if trackbars_on:
            
            cv2.createTrackbar('filterThresh', 'r1_sliders', self.thresholdValue, 100, nothing)
            cv2.createTrackbar('r1LowHue', 'r1_sliders', self.r1LowHue, 179, nothing)
            cv2.createTrackbar('r1LowSat', 'r1_sliders', self.r1LowSat, 255, nothing)
            cv2.createTrackbar('r1LowVal', 'r1_sliders', self.r1LowVal, 255, nothing)
            cv2.createTrackbar('r1UpHue', 'r1_sliders', self.r1UpHue, 179, nothing)
            cv2.createTrackbar('r1UpSat', 'r1_sliders', self.r1UpSat, 255, nothing)
            cv2.createTrackbar('r1UpVal', 'r1_sliders', self.r1UpVal, 255, nothing)
            cv2.createTrackbar('r2LowHue', 'r2_sliders', self.r2LowHue, 179, nothing)
            cv2.createTrackbar('r2LowSat', 'r2_sliders', self.r2LowSat, 255, nothing)
            cv2.createTrackbar('r2LowVal', 'r2_sliders', self.r2LowVal, 255, nothing)
            cv2.createTrackbar('r2UpHue', 'r2_sliders', self.r2UpHue, 179, nothing)
            cv2.createTrackbar('r2UpSat', 'r2_sliders', self.r2UpSat, 255, nothing)
            cv2.createTrackbar('r2UpVal', 'r2_sliders', self.r2UpVal, 255, nothing)
            cv2.createTrackbar('gLowHue', 'g_sliders', self.gLowHue, 179, nothing)
            cv2.createTrackbar('gLowSat', 'g_sliders', self.gLowSat, 255, nothing)
            cv2.createTrackbar('gLowVal', 'g_sliders', self.gLowVal, 255, nothing)
            cv2.createTrackbar('gUpHue', 'g_sliders', self.gUpHue, 179, nothing)
            cv2.createTrackbar('gUpSat', 'g_sliders', self.gUpSat, 255, nothing)
            cv2.createTrackbar('gUpVal', 'g_sliders', self.gUpVal, 255, nothing)


        ## red ball mask areas
        #red_mask_1 = cv2.inRange(im_hsv, (0, 120, 70), (10, 255, 255))
        #red_mask_2 = cv2.inRange(im_hsv, (170, 120, 70), (179, 255, 255)) 

        #lower_red1 = np.array([0, 120, 100])
        #upper_red1 = np.array([10, 255, 255])
        #lower_red2 = np.array([170, 120, 100])
        #upper_red2 = np.array([179, 255, 255])

        #green mask area centered around 
        # (80, 127, 222)
        #green_mask = cv2.inRange(im_hsv, (55, 120, 70), (105, 255, 255)) 
        #    lower_green = np.array([40, 10, 200])
        #    upper_green = np.array([120, 245, 255])

        #lower_green = np.array([55, 120, 70])
        #upper_green = np.array([105, 255, 255])


        #sets how much to blur
        self.filt=39
        self.exitNow=0
        self.pause=0
import numpy as np  # numpy - manipulate the packet data returned by depthai
import cv2  # opencv - display the video stream
import depthai  # access the camera and its data packets
import consts.resource_paths  # load paths to depthai resources

# Load the device cmd file
if not depthai.init_device(consts.resource_paths.device_cmd_fpath):
    print("Error initializing device. Try to reset it.")
    exit(1)

# Create the pipeline using the 'metaout' and 'previewout' streams, establishing the first connection to the device.
p = depthai.create_pipeline({
    # metaout - contains neural net output
    # previewout - color video
    'streams': ['metaout', 'previewout'],
    'ai': {
        # The paths below are based on the tutorial steps.
        'blob_file': "face-detection-retail-0004.blob",
        'blob_file_config': "face-detection-retail-0004.json"
    }
})

if p is None:
    print('Pipeline was not created.')
    exit(2)

# Maintains a list of detections that will be applied to to the previewout stream.
# Only the most recent detections are applied.
entries_prev = []

while True:
Пример #17
0
import numpy as np  # numpy - manipulate the packet data returned by depthai
import cv2  # opencv - display the video stream
import depthai  # access the camera and its data packets
import consts.resource_paths  # load paths to depthai resources

if not depthai.init_device(consts.resource_paths.device_cmd_fpath):
    raise RuntimeError("Error initializing device. Try to reset it.")

# Create the pipeline using the 'previewout' stream, establishing the first connection to the device.
pipeline = depthai.create_pipeline(
    config={
        'streams': ['previewout', 'metaout'],
        'ai': {
            "blob_file": consts.resource_paths.blob_fpath,
            "blob_file_config": consts.resource_paths.blob_config_fpath,
        }
    })

if pipeline is None:
    raise RuntimeError('Pipeline creation failed!')

entries_prev = []

while True:
    # Retrieve data packets from the device.
    # A data packet contains the video frame data.
    nnet_packets, data_packets = pipeline.get_available_nnet_and_data_packets()

    for _, nnet_packet in enumerate(nnet_packets):
        entries_prev = []
        for _, e in enumerate(nnet_packet.entries()):