def testWhenSearchinfForObstaclesThenCorrectPositionIsReturned(self): files = zip(sorted(self.rgbImages), sorted(self.depthImages)) for i, (rgbImageFile, depthImageFile) in enumerate(files): tableDistanceValues = sorted(distanceValues[i]) rgbImage = cv2.imread(rgbImageFile) depthImage = numpy.load(depthImageFile) pointConvertor = KinectPointConvertor(rgbImage, depthImage, self.transformMatrix) objectPositionFinder = ObstacleFinder(rgbImage, pointConvertor) foundPositions = sorted(objectPositionFinder.findPositions()) for (actualX, actualZ), (foundX, foundZ) in zip(tableDistanceValues, foundPositions): self.assertTrue(foundX - self.POSITION_ERROR_IN_CM <= actualX <= foundX + self.POSITION_ERROR_IN_CM) self.assertTrue(foundZ - self.POSITION_ERROR_IN_CM <= actualZ <= foundZ + self.POSITION_ERROR_IN_CM)
def getObstaclesPositions(self): print 'getObstaclesPositions' for _ in range(self.NUMBER_OF_RETRIES): try: rgbImage, depthImage = self.imageTaker.getImages() pointConvertor = KinectPointConvertor(rgbImage, depthImage, self.transformMatrix) obstacleFinder = ObstacleFinder(rgbImage, pointConvertor) positions = obstacleFinder.findPositions() GUI.obstacles.put(positions) if len(positions) != 2: continue else: break except (NoContourFoundException, InvalidDistanceException) as exception: print "getObstaclesPositions: ", exception else: return [] return positions
def printObstaclePositions(table, rgbImage, depthImage): pointConvertor = KinectPointConvertor(rgbImage, depthImage, tablesTransformMatrix[table - 1]) obstacleFinder = ObstacleFinder(rgbImage, pointConvertor) print obstacleFinder.findPositions()