def run_ntimes_oneblob(self):
        """
        Run the BlobFinder on one blob
        """
        self.background = background()
        self.mask = mask(self.background)
        blob = randomblob(self.background)[0]
        self.bfinder = BlobFinder(self.mask)
        # init background
        for _ in range(10):
            blob = randomblob(self.background)[0]
            self.bfinder.run(blob)

        self.blobs_center = list()
        self.blobs_found = list()
        for frame_i in range(10):
            blob, blob_center, _, _ = \
                randomblob(self.background)
            self.bfinder.run(blob)
            self.blobs_center.append(blob_center)
            self.blobs_found.append(self.bfinder.filtered_contours)

        self.blob = blob
예제 #2
0
# Create image for display
scale = 0.4
refresh_time = 0.5
blob_brightness = [20, 30, 150, 254]
max_nb_images = 1
# Bfinder parameters
args = dict()

# loop over the frames of the video
t_start = time.time()
# mask
background = btgen.background()
mask = btgen.mask(background)
# Create a bfinder tracker
bfinder = BlobFinder(mask)
bfinder.erode_iter = 2
bfinder.dilate_iter = 2
bfinder.area_lim = [0, 10000]
bfinder.roundness_lim = [0, 1]
bfinder.background_init = 60
bfinder.threshold = 10
bfinder.gaussian_blur = 1
bfinder.skip_filter_contours = False
# Parameter for croping
xmargin = [0, 0]
ymargin = [0, 0]

# Build up an expected histogram
ninitframes = 50
bin_edges = np.arange(1, 255)
예제 #3
0
args = parser_tracker().parse_args()
template_frame = args.images
state = 'runforward'
config_yaml =  args.config
scale =  args.scale
# Parameter for croping
xmargin = [0, 0]
ymargin = [0, 0]

if template_frame is None:
    # mask
    background = btgen.background()
    mask = btgen.mask(background)
    blob_brightness = [20, 30, 150, 254]
    bfinder = BlobFinder(mask)
else:
    filelist = sorted(glob.glob(template_frame))
    bfinder = BlobFinder(None, fgbg=cv2.createBackgroundSubtractorKNN())

# Create a bfinder tracker
config = load_config(config_yaml)
update_config_bfinder(bfinder, config['bfinder'])
bfinder.skip_filter_contours = False

# init background
maxframe = len(filelist)
print('INIT Background')
if template_frame is not None:
    bfinder.background_learning_rate = -1
    for frame_i in range(bfinder.background_init,0,-1):
        time.sleep(0.25)
    # Load image for file list if video is None
    elif args.get("video", None) is None:
        argcam= args["folder"]
    # otherwise, we are reading from a video file
    else:
        argcam= args["video"]

    if args["tra_file"] is None:
        if args["folder"] is None:
            raise NameError('target trajectory file is not present')
        else:
            folder_path = os.path.dirname(args["folder"])
            args["tra_file"] = os.path.join(folder_path, 'trajectory.tra')
    display=False
    bbee = BlobFinder(mask, None)
    bbee.erode_iter = 2
    bbee.dilate_iter = 3
    bbee.threshold = 128
    bbee.gaussian_blur = 3
    bbee.skip_filter_contours = True

    #Parameters markers:
    max_area  = 40
    params = [80,300]
    gaussian_blur=5
    
    # Create image for display
    scale = 0.4
    refresh_time = 0.5
    nframe_bg = 1000
예제 #5
0
        time.sleep(0.25)
    # Load image for file list if video is None
    elif args.get("video", None) is None:
        argcam = args["folder"]
    # otherwise, we are reading from a video file
    else:
        argcam = args["video"]

    if args["tra_file"] is None:
        if args["folder"] is None:
            raise NameError('target trajectory file is not present')
        else:
            folder_path = os.path.dirname(args["folder"])
            args["tra_file"] = os.path.join(folder_path, 'trajectory_v2.tra')

    bmarker = BlobFinder(mask, None)
    bmarker.erode_iter = 1
    bmarker.dilate_iter = 1
    bmarker.area_lim = [0, 2000]
    bmarker.roundness_lim = [0, 1]
    bmarker.threshold = args["threshold"]
    bmarker.gaussian_blur = 1
    bmarker.skip_filter_contours = True

    # Create image for display
    scale = 0.4
    refresh_time = 0.5
    nframe_bg = 100

    camera = cv2.VideoCapture(argcam)
    background = get_background(camera, nframe_bg)
class TestBlobFinder(unittest.TestCase):
    def __init__(self, *args, **kwargs):
        super(TestBlobFinder, self).__init__(*args, **kwargs)
        self.run_ntimes_oneblob()

    def run_ntimes_oneblob(self):
        """
        Run the BlobFinder on one blob
        """
        self.background = background()
        self.mask = mask(self.background)
        blob = randomblob(self.background)[0]
        self.bfinder = BlobFinder(self.mask)
        # init background
        for _ in range(10):
            blob = randomblob(self.background)[0]
            self.bfinder.run(blob)

        self.blobs_center = list()
        self.blobs_found = list()
        for frame_i in range(10):
            blob, blob_center, _, _ = \
                randomblob(self.background)
            self.bfinder.run(blob)
            self.blobs_center.append(blob_center)
            self.blobs_found.append(self.bfinder.filtered_contours)

        self.blob = blob

    def test_setget_image(self):
        myim = self.bfinder.original_image
        self.assertTrue(np.allclose(myim, self.blob))

    def test_masked(self):
        myim = self.bfinder.masked_image
        testimage = cv2.bitwise_and(self.blob, self.blob, mask=self.mask)
        self.assertTrue(np.allclose(myim, testimage))

    def test_segmented(self):
        myim = self.bfinder.segmented_image
        self.assertTrue(np.allclose(np.unique(myim), [0, 255]))

    def test_oneblob_numbers(self):
        condition = True
        for frame_i in range(len(self.blobs_center)):
            contours = self.blobs_found[frame_i]
            if len(contours) > 1:
                print('Too many blobs detected')
                condition = False
                continue
            elif len(contours) < 1:
                print('Too few blobs detected')
                condition = False
                continue
        self.assertTrue(condition)

    def test_oneblob_positions(self):
        condition = True
        for frame_i in range(len(self.blobs_center)):
            contours = self.blobs_found[frame_i]
            contours = contours[0]
            px_error = np.sqrt(
                np.sum(contours.center - self.blobs_center[frame_i])**2)
            if px_error > 1:
                print('Pixel error too large : {}'.format(px_error))
                condition = False
        self.assertTrue(condition)