def transformFrame(img, twoRange, strict_ranges, loose_ranges):
    img = img.toHSV()
    res = cv2.inRange(img.getNumpyCv2(), np.array(strict_ranges[0]), np.array(strict_ranges[1]))
    simg = scv.Image(res.transpose(1, 0))

    if twoRange:
        blobs = simg.findBlobs()
        if blobs:
            x = int(blobs[0].minRectX() - 20)
            if x < 0:
                x = 0
            y = int(blobs[0].minRectY() - 20)
            if y < 0:
                y = 0
            w = int(blobs[0].minRectWidth() + 40)
            h = int(blobs[0].minRectHeight() + 40)
            bb = (x, y, w, h)
            cropped = img.crop(bb)
            loose_res = cv2.inRange(cropped.getNumpyCv2(), np.array(loose_ranges[0]), np.array(loose_ranges[1]))

            for i in range(y, y+cropped.height):
                for j in range(x, x+cropped.width):
                    temp = loose_res[i - y, j - x]
                    res[i, j] = temp
            simg = scv.Image(res.transpose(1, 0))

    simg = simg.morphClose()
    simg = simg.morphOpen()
    return simg
Пример #2
0
	def update(self, dt):
		self.img = GameInstance().cam.getImage().flipHorizontal()
		GameInstance().lower = np.array((float(self.val[0]), float(self.val[1]), float(self.val[2])))
		GameInstance().upper = np.array((float(self.val[3]), float(self.val[4]), float(self.val[5])))
		hsv = self.img.toHSV()
		mask = cv2.inRange(hsv.getNumpy(), GameInstance().lower, GameInstance().upper)
		img2 = Image(source=mask)
		img2 = img2.erode(1)
		img2 = img2.dilate(2)
		if self.cvActiveBloops.active:
			blops = img2.findBlobs()
			if blops:
				self.cvBlob.texture = getKivyTexture(blops[-1].getMaskedImage())
		self.cvFilter.texture = getKivyTexture(img2)
		self.cvImage.texture = getKivyTexture(self.img)
Пример #3
0
 def update(self, dt):
     self.img = GameInstance().cam.getImage().flipHorizontal()
     GameInstance().lower = np.array(
         (float(self.val[0]), float(self.val[1]), float(self.val[2])))
     GameInstance().upper = np.array(
         (float(self.val[3]), float(self.val[4]), float(self.val[5])))
     hsv = self.img.toHSV()
     mask = cv2.inRange(hsv.getNumpy(),
                        GameInstance().lower,
                        GameInstance().upper)
     img2 = Image(source=mask)
     img2 = img2.erode(1)
     img2 = img2.dilate(2)
     if self.cvActiveBloops.active:
         blops = img2.findBlobs()
         if blops:
             self.cvBlob.texture = getKivyTexture(
                 blops[-1].getMaskedImage())
     self.cvFilter.texture = getKivyTexture(img2)
     self.cvImage.texture = getKivyTexture(self.img)
Пример #4
0
    def _initData(self):
        """
        Initialize the cluster centers and initial values of the pixel-wise
        cluster assignment and distance values.
        """
        self.clusters = -1 * np.ones(self.img.shape[:2])
        self.distances = self.FLT_MAX * np.ones(self.img.shape[:2])

        centers = []
        for i in xrange(self.step, self.width - self.step/2, self.step):
            for j in xrange(self.step, self.height - self.step/2, self.step):
                nc = self._findLocalMinimum(center=(i, j))
                color = self.labimg[nc[1], nc[0]]
                center = [color[0], color[1], color[2], nc[0], nc[1]]
                centers.append(center)
        self.center_counts = np.zeros(len(centers))
        self.centers = np.array(centers)
Пример #5
0
    def predictY(self):
        """
        **SUMMARY**

        Returns a numpy array of the predicted y (vertical) coordinate of each feature.

        **RETURNS**

        A numpy array.

        **EXAMPLE**

        >>> while True:
            ... img1 = cam.getImage()
            ... ts = img1.track("camshift", ts1, img, bb)
            ... img = img1
        >>> print ts.predictY()

        """
        return np.array([f.predict_pt[1] for f in self])
Пример #6
0
    def correctedCoordinates(self):
        """
        **SUMMARY**

        Returns a numpy array of the corrected coordinates of each feature.

        **RETURNS**

        A numpy array.

        **EXAMPLE**

        >>> while True:
            ... img1 = cam.getImage()
            ... ts = img1.track("camshift", ts1, img, bb)
            ... img = img1
        >>> print ts.predictedCoordinates()

        """
        return np.array([f.state_pt for f in self])
Пример #7
0
    def predictY(self):
        """
        **SUMMARY**

        Returns a numpy array of the predicted y (vertical) coordinate of each feature.

        **RETURNS**

        A numpy array.

        **EXAMPLE**

        >>> while True:
            ... img1 = cam.getImage()
            ... ts = img1.track("camshift", ts1, img, bb)
            ... img = img1
        >>> print ts.predictY()

        """
        return np.array([f.predict_pt[1] for f in self])
Пример #8
0
    def correctedCoordinates(self):
        """
        **SUMMARY**

        Returns a numpy array of the corrected coordinates of each feature.

        **RETURNS**

        A numpy array.

        **EXAMPLE**

        >>> while True:
            ... img1 = cam.getImage()
            ... ts = img1.track("camshift", ts1, img, bb)
            ... img = img1
        >>> print ts.predictedCoordinates()

        """
        return np.array([f.state_pt for f in self])
Пример #9
0
    def pixleVelocityRealTime(self):
        """
        **SUMMARY**

        Get each Pixel Velocity of the tracked object in pixel/frames.

        **PARAMETERS**

        No Parameters required.

        **RETURNS**

        * *numpy array* * - array of pixel velocity tuple.

        >>> while True:
            ... img1 = cam.getImage()
            ... ts = img1.track("camshift", ts1, img, bb)
            ... img = img1
        >>> print ts.pixelVelocityRealTime()
        """
        return np.array([f.rt_vel for f in self])
Пример #10
0
    def pixleVelocityRealTime(self):
        """
        **SUMMARY**

        Get each Pixel Velocity of the tracked object in pixel/frames.

        **PARAMETERS**

        No Parameters required.

        **RETURNS**

        * *numpy array* * - array of pixel velocity tuple.

        >>> while True:
            ... img1 = cam.getImage()
            ... ts = img1.track("camshift", ts1, img, bb)
            ... img = img1
        >>> print ts.pixelVelocityRealTime()
        """
        return np.array([f.rt_vel for f in self])
Пример #11
0
    def areaRatio(self):
        """
        **SUMMARY**

        Returns a numpy array of the areaRatio of each feature.
        where areaRatio is the ratio of the size of the current bounding box to
        the size of the initial bounding box

        **RETURNS**

        A numpy array.

        **EXAMPLE**

        >>> while True:
            ... img1 = cam.getImage()
            ... ts = img1.track("camshift", ts1, img, bb)
            ... img = img1
        >>> print ts.areaRatio

        """
        return np.array([f.areaRatio for f in self])
Пример #12
0
    def areaRatio(self):
        """
        **SUMMARY**

        Returns a numpy array of the areaRatio of each feature.
        where areaRatio is the ratio of the size of the current bounding box to
        the size of the initial bounding box

        **RETURNS**

        A numpy array.

        **EXAMPLE**

        >>> while True:
            ... img1 = cam.getImage()
            ... ts = img1.track("camshift", ts1, img, bb)
            ... img = img1
        >>> print ts.areaRatio

        """
        return np.array([f.areaRatio for f in self])
from trash.vision.scribbles import Trackbars


tbars = Trackbars()
ranges = tbars.getFromUser()

video = scv.Camera()
display = scv.Display()

while display.isNotDone():
    img = video.getImage()
    img = img.toHSV()


    res = cv2.inRange(img.getNumpyCv2(), np.array(ranges[0]), np.array(ranges[1]))

    #simg = scv.Image(thres, cv2image=False)
    simg = scv.Image(res.transpose(1,0))

    #simg = simg.morphOpen()
    simg = simg.morphClose()
    simg = simg.morphOpen()

    #znajdz najwiekszego bloba na masce i on pewnie bedzie pileczka, nowa maska
    #ewentualnie podejscie z dwoma filtrami, jeden "surowy", ma znalezc tylko kawalek pilki
    # a drugi lagodniejszy, ale dolaczamy tlyko to, co przylega do znalezionych
    # powinno rozrosnac znaleziony fragment pilki do calej pilki

    blobs = simg.findBlobs()
    if blobs:
Пример #14
0
    class __GameInstance:
        lower = np.array((float(0), float(0), float(0)))
        upper = np.array((float(0), float(0), float(0)))

        def __init__(self):
            self.cam = Camera()
    ([34, 25, 51], [38, 255, 255]),
    ([72, 0, 137], [85, 23, 182]),
    ([45, 3, 110], [67, 31, 162]),
    ([21, 23, 0], [49, 237, 67])
]

display = scv.Display()

while display.isNotDone():
    img = video.getImage()
    img = img.toHSV()


    thres = []
    for range in ranges:
        thres.append(cv2.inRange(img.getNumpyCv2(), np.array(range[0]), np.array(range[1])))

    res = reduce(lambda x, y: cv2.add(x, y), thres)

    #simg = scv.Image(thres, cv2image=False)
    simg = scv.Image(res.transpose(1,0))

    #simg = simg.morphOpen()
    simg = simg.morphClose()
    simg = simg.morphOpen()

    #blobs = simg.findBlobs()
    #if blobs:
    #    circles = blobs.filter([b.isCircle(0.4) for b in blobs])
    #    if circles:
    #    #    for c in circles:
display = scv.Display()

history = []

tracker = Tracker()

while display.isNotDone():
    img = video.getImage()
    img = img.toHSV()

    #apply strict range, find biggest blob
    #near the biggest blob apply loose range



    res = cv2.inRange(img.getNumpyCv2(), np.array(strict_ranges[0]), np.array(strict_ranges[1]))

    simg = scv.Image(res.transpose(1, 0))

    blobs = simg.findBlobs()
    if blobs:
        x = int(blobs[0].minRectX() - 20)
        if x < 0:
            x = 0
        y = int(blobs[0].minRectY() - 20)
        if y < 0:
            y = 0
        w = int(blobs[0].minRectWidth() + 40)
        h = int(blobs[0].minRectHeight() + 40)
        bb = (x, y, w, h)
        # print x,y,w,h
Пример #17
0
def camshiftTracker(img, bb, ts, **kwargs):
    """
    **DESCRIPTION**

    (Dev Zone)

    Tracking the object surrounded by the bounding box in the given
    image using CAMshift method.

    Warning: Use this if you know what you are doing. Better have a 
    look at Image.track()

    **PARAMETERS**

    * *img* - Image - Image to be tracked.
    * *bb*  - tuple - Bounding Box tuple (x, y, w, h)
    * *ts*  - TrackSet - SimpleCV.Features.TrackSet.

    Optional PARAMETERS:

    lower      - Lower HSV value for inRange thresholding
                 tuple of (H, S, V)

    upper      - Upper HSV value for inRange thresholding
                 tuple of (H, S, V)

    mask       - Mask to calculate Histogram. It's better 
                 if you don't provide one.

    num_frames - number of frames to be backtracked.

    **RETURNS**

    SimpleCV.Features.Tracking.CAMShift

    **HOW TO USE**

    >>> cam = Camera()
    >>> ts = []
    >>> img = cam.getImage()
    >>> bb = (100, 100, 300, 300) # get BB from somewhere
    >>> ts = CAMShiftTracker(img, bb, ts, lower=(40, 120, 120), upper=(80, 200, 200), num_frames=30)
    >>> while (some_condition_here):
        ... img = cam.getImage()
        ... bb = ts[-1].bb
        ... ts = CAMShiftTracker(img, bb, ts, lower=(40, 120, 120), upper=(80, 200, 200), num_frames=30)
        ... ts[-1].drawBB()
        ... img.show()

    This is too much confusing. Better use
    Image.track() method.

    READ MORE:

    CAMShift Tracker:
    Uses meanshift based CAMShift thresholding technique. Blobs and objects with
    single tone or tracked very efficiently. CAMshift should be preferred if you 
    are trying to track faces. It is optimized to track faces.
    """

    lower = np.array((0., 60., 32.))
    upper = np.array((180., 255., 255.))
    mask = None
    num_frames = 40

    if not isinstance(bb, tuple):
        bb = tuple(bb)
    bb = (int(bb[0]), int(bb[1]), int(bb[2]), int(bb[3]))

    for key in kwargs:
        if key == 'lower':
            lower = np.array(tuple(kwargs[key]))
        elif key == 'upper':
            upper = np.array(tuple(kwargs[key]))
        elif key == 'mask':
            mask = kwargs[key]
            mask = mask.getNumpyCv2()
        elif key == 'num_frames':
            num_frames = kwargs[key]

    hsv = cv2.cvtColor(img.getNumpyCv2(), cv2.cv.CV_BGR2HSV)
    if mask is None:
        mask = cv2.inRange(hsv, lower, upper)

    x0, y0, w, h = bb
    x1 = x0 + w - 1
    y1 = y0 + h - 1
    hsv_roi = hsv[y0:y1, x0:x1]
    mask_roi = mask[y0:y1, x0:x1]

    hist = cv2.calcHist([hsv_roi], [0], mask_roi, [16], [0, 180])
    cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX)
    hist_flat = hist.reshape(-1)
    imgs = [hsv]
    if len(ts) > num_frames and num_frames > 1:
        for feat in ts[-num_frames:]:
            imgs.append(feat.image.toHSV().getNumpyCv2())
    elif len(ts) < num_frames and num_frames > 1:
        for feat in ts:
            imgs.append(feat.image.toHSV().getNumpyCv2())

    prob = cv2.calcBackProject(imgs, [0], hist_flat, [0, 180], 1)
    prob &= mask
    term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
    new_ellipse, track_window = cv2.CamShift(prob, bb, term_crit)
    if track_window[2] == 0 or track_window[3] == 0:
        track_window = bb
    track = CAMShiftTrack(img, track_window, new_ellipse)

    return track
Пример #18
0
def camshiftTracker(img, bb, ts, **kwargs):
    """
    **DESCRIPTION**
    
    (Dev Zone)

    Tracking the object surrounded by the bounding box in the given
    image using CAMshift method.

    Warning: Use this if you know what you are doing. Better have a 
    look at Image.track()

    **PARAMETERS**

    * *img* - Image - Image to be tracked.
    * *bb*  - tuple - Bounding Box tuple (x, y, w, h)
    * *ts*  - TrackSet - SimpleCV.Features.TrackSet.

    Optional PARAMETERS:

    lower      - Lower HSV value for inRange thresholding
                 tuple of (H, S, V)
                
    upper      - Upper HSV value for inRange thresholding
                 tuple of (H, S, V)

    mask       - Mask to calculate Histogram. It's better 
                 if you don't provide one.

    num_frames - number of frames to be backtracked.

    **RETURNS**

    SimpleCV.Features.Tracking.CAMShift

    **HOW TO USE**

    >>> cam = Camera()
    >>> ts = []
    >>> img = cam.getImage()
    >>> bb = (100, 100, 300, 300) # get BB from somewhere
    >>> ts = CAMShiftTracker(img, bb, ts, lower=(40, 120, 120), upper=(80, 200, 200), num_frames=30)
    >>> while (some_condition_here):
        ... img = cam.getImage()
        ... bb = ts[-1].bb
        ... ts = CAMShiftTracker(img, bb, ts, lower=(40, 120, 120), upper=(80, 200, 200), num_frames=30)
        ... ts[-1].drawBB()
        ... img.show()

    This is too much confusing. Better use
    Image.track() method.

    READ MORE:

    CAMShift Tracker:
    Uses meanshift based CAMShift thresholding technique. Blobs and objects with
    single tone or tracked very efficiently. CAMshift should be preferred if you 
    are trying to track faces. It is optimized to track faces.
    """

    lower = np.array((0., 60., 32.))
    upper = np.array((180., 255., 255.))
    mask = None
    num_frames = 40

    if not isinstance(bb, tuple):
        bb = tuple(bb)
    bb = (int(bb[0]), int(bb[1]), int(bb[2]), int(bb[3]))

    for key in kwargs:
        if key == 'lower':
            lower = np.array(tuple(kwargs[key]))
        elif key == 'upper':
            upper = np.array(tuple(kwargs[key]))
        elif key == 'mask':
            mask = kwargs[key]
            mask = mask.getNumpyCv2()
        elif key == 'num_frames':
            num_frames = kwargs[key]

    hsv = cv2.cvtColor(img.getNumpyCv2(), cv2.cv.CV_BGR2HSV)
    if mask is None:
        mask = cv2.inRange(hsv, lower, upper)

    x0, y0, w, h = bb
    x1 = x0 + w -1
    y1 = y0 + h -1
    hsv_roi = hsv[y0:y1, x0:x1]
    mask_roi = mask[y0:y1, x0:x1]

    hist = cv2.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] )
    cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX);
    hist_flat = hist.reshape(-1)
    imgs = [hsv]
    if len(ts) > num_frames and num_frames > 1:
        for feat in ts[-num_frames:]:
            imgs.append(feat.image.toHSV().getNumpyCv2())
    elif len(ts) < num_frames and num_frames > 1:
        for feat in ts:
            imgs.append(feat.image.toHSV().getNumpyCv2())

    prob = cv2.calcBackProject(imgs, [0], hist_flat, [0, 180], 1)
    prob &= mask
    term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
    new_ellipse, track_window = cv2.CamShift(prob, bb, term_crit)
    if track_window[2] == 0 or track_window[3] == 0:
        track_window = bb
    track = CAMShiftTrack(img, track_window, new_ellipse)

    return track