Пример #1
0
    def _create_depth(self, pipeline, using_nn):
        '''!
            Creates the depth node.

            Uses the appropiate settings for the configs and existing pipeline.

            Parameters:
                @param pipeline: The pipeline to add the node to
                @param using_nn: Whether the pipeline is using a neural network or not
        '''

        if self.depth:

            depth_node = pipeline.createStereoDepth()
            depth_node.setLeftRightCheck(True)
            depth_node.setConfidenceThreshold(200)
            
            if (not using_nn) and self._configs["depth_close"] == True: #See close -> Extended
                depth_node.setExtendedDisparity(True)
                depth_node.setSubpixel(False)
            elif (not using_nn) and self._configs["depth_far"] == True: #See longer -> Subpixel
                depth_node.setSubpixel(True)
                depth_node.setExtendedDisparity(False)
            else:
                depth_node.setDepthAlign(dai.CameraBoardSocket.RGB)
                #depth_node.setDepthAlign(dai.StereoDepthProperties.DepthAlign.CENTER)
                depth_node.setSubpixel(False)
                depth_node.setExtendedDisparity(False)

            self.nodes[OAK_Stage.DEPTH] = depth_node

            if self._configs["depth_host"] == False:
                spatialLocationCalculator = pipeline.createSpatialLocationCalculator()
                
                topLeft = dai.Point2f(0.4, 0.4)
                bottomRight = dai.Point2f(0.6, 0.6)
                config = dai.SpatialLocationCalculatorConfigData()
                config.depthThresholds.lowerThreshold = 100
                config.depthThresholds.upperThreshold = 10000
                config.roi = dai.Rect(topLeft, bottomRight)
                spatialLocationCalculator.setWaitForConfigInput(False)
                spatialLocationCalculator.initialConfig.addROI(config)

                depth_node.disparity.link(spatialLocationCalculator.inputDepth)

                self.nodes["spartial_location_calculator"] = spatialLocationCalculator
spatialLocationCalculator.passthroughDepth.link(xoutDepth.input)
stereo.depth.link(spatialLocationCalculator.inputDepth)

topLeft = dai.Point2f(0.4, 0.4)
bottomRight = dai.Point2f(0.6, 0.6)

# topLeft2 = dai.Point2f(0.6, 0.4)
# bottomRight2 = dai.Point2f(0.8, 0.6)
# topLeft = dai.Point2f(0.1, 0.1)
# bottomRight = dai.Point2f(0.9, 0.9)

spatialLocationCalculator.setWaitForConfigInput(False)
config = dai.SpatialLocationCalculatorConfigData()
config.depthThresholds.lowerThreshold = 100
config.depthThresholds.upperThreshold = 10000
config.roi = dai.Rect(topLeft, bottomRight)
spatialLocationCalculator.initialConfig.addROI(config)
spatialLocationCalculator.out.link(xoutSpatialData.input)
xinSpatialCalcConfig.out.link(spatialLocationCalculator.inputConfig)

# Pipeline is defined, now we can connect to the device
with dai.Device(pipeline) as device:
    device.startPipeline()

    # Output queue will be used to get the depth frames from the outputs defined above
    depthQueue = device.getOutputQueue(name="depth", maxSize=4, blocking=False)
    spatialCalcQueue = device.getOutputQueue(name="spatialData",
                                             maxSize=4,
                                             blocking=False)
    spatialCalcConfigInQueue = device.getInputQueue("spatialCalcConfig")
Пример #3
0
    def get3DPosition(self, point_x,point_y, size):
        '''!
            Copmutes the 3D postiion of a point in the image.

            Parameters:
                @param point_x: x coordinate of the point
                @param point_y: y coordinate of the point
                @param size: size of the image of the coordinates
        '''

        try:
            self._depth_frame
        except:
            return np.array([np.inf,np.inf,np.inf] ,dtype=np.float64)

        point_x *= self._depth_frame.shape[0]/size[0]
        point_y *= self._depth_frame.shape[1]/size[1]

        x_pixel = np.array([point_x,point_y,1.0],dtype=np.float64)

        point_x = int(point_x)
        point_y = int(point_y)


        x_space = np.zeros(3,dtype=np.float64)

        n_pixel = 0

        color_calib_size = self.camera_calibration_size[OAK_Stage.COLOR]
        color_intrinsic = self.camera_intrinsics[OAK_Stage.COLOR]

        scale = [self._depth_frame.shape[0]/color_calib_size[0], self._depth_frame.shape[1]/color_calib_size[1]]


        K = color_intrinsic.copy()
        K[0] *= scale[0]
        K[1] *= scale[1]

        k_inv = np.linalg.inv(K)

        n_point = self._configs["depth_roi_size"]
        
        x_min = point_x-(n_point//2)-1
        x_max = point_x+(n_point//2)

        y_min = point_y-(n_point//2)-1
        y_max = point_y+(n_point//2)

        if x_min < 0:
            x_min = 0
        if y_min < 0:
            y_min = 0

        if x_max >= self._depth_frame.shape[0]:
            x_max = self._depth_frame.shape[0]
        if y_max >= self._depth_frame.shape[1]:
            y_max = self._depth_frame.shape[1]

        if self._configs["depth_host"] == False:
            topLeft = dai.Point2f(x_min, y_min)
            bottomRight = dai.Point2f(x_max, y_max)

            config = dai.SpatialLocationCalculatorConfigData()
            config.depthThresholds.lowerThreshold = 100
            config.depthThresholds.upperThreshold = 10000
            config.roi = dai.Rect(topLeft, bottomRight).normalize(width=self._depth_frame.shape[1], height=self._depth_frame.shape[0])

            cfg = dai.SpatialLocationCalculatorConfig()
            cfg.addROI(config)

            self._oak_input_queue["spatialConfig"].send(cfg)

            spatialdata = self._spatial_queue.get()

            x_space = np.zeros(3, dtype = np.float64)

            x_space[0] = spatialdata[0].spatialCoordinates.x
            x_space[1] = spatialdata[0].spatialCoordinates.y
            x_space[2] = spatialdata[0].spatialCoordinates.z

            return []
        
        samples = self._depth_frame[x_min:x_max,y_min:y_max].flatten()
        samples = samples[samples != 0]
        samples = samples[samples>100]

        if samples.shape[0] == 0:
            return np.array([np.inf,np.inf,np.inf] ,dtype=np.float64)
        
        

        z = 0

        if self._configs["depth_point_mode"] == "median":
            z = np.median(samples)
        elif self._configs["depth_point_mode"] == "mean":
            z = np.mean(samples)
        elif self._configs["depth_point_mode"] == "min":
            samples = np.sort(samples)
            z = np.mean(samples[:int(samples.shape[0]*0.1)])
        elif self._configs["depth_point_mode"] == "max":
            samples = np.sort(samples)
            z = np.median(samples[int(samples.shape[0]*0.9):])
        else:


            hist, edge = np.histogram(samples, bins=1000, range=(0.0,5000.0), density=True)

            a = []

            for i in range(edge.shape[0]-1):
                a.append((edge[i]+edge[i+1])/2)

            edge = np.array(a)
            best_z = 0
            best_z_inliers = 0

            for i in range(16):
                index = int(np.random.rand(1)[0]*samples.shape[0])
                z_test = samples[index]
                
                if i == 0:
                    z_test = np.median(samples)
                if i == 1:
                    z_test = np.mean(samples)
                    
                z_min = z_test-100.0
                z_max = z_test+100.0

                mask = np.logical_and(edge>z_min,edge<z_max)
                values = edge[mask]

                weights = hist[np.where(mask)]

                if np.sum(weights) == 0:
                    continue

                #z_test = np.average(values, weights=weights)
                n_inlier = values.shape[0]

                if n_inlier > best_z_inliers:
                    best_z_inliers = n_inlier
                    best_z = z_test
            
            z = best_z

        if z == 0:
            return np.array([np.inf,np.inf,np.inf] ,dtype=np.float64)

        x_space = k_inv @ (z*x_pixel) 

        return x_space/1000.0