Beispiel #1
0
def crossesMask(blob, mask):
    # get a slightly eroded mask
    #mask = cv2.erode(np.uint8(mask), None)
    mask = np.bool8(mask)

    mask1 = util.region2mask(blob)
    area0 = cv2.countNonZero(
        np.uint8(mask1)
    )  #len(mask1.nonzero()[0]) # number of nonzero pixels in the original region
    #print area0

    # check if masking cut off the blob somehow
    # check if the masked area is smaller than the original area
    mask2 = mask1 & mask
    #imshow('blob mask', np.uint8(mask1)*255)
    #imshow('mask subtracted', np.uint8(mask2)*255)

    area1 = cv2.countNonZero(np.uint8(mask2))  #len(mask2.nonzero()[0])
    #print 'area1', area1
    #print 'area0', area0
    #waitKey()

    if area1 < area0 and area1 > 0:
        return True

    return False
Beispiel #2
0
def getHandmaskHSV(im, dep, tablemodel, mask):

    DEBUG = False

    vidArea = dep.shape[0] * dep.shape[1]

    # get colormasks
    colormaskLow, colormaskHigh = getColorMasks(im)
    if DEBUG:
        imshow('colormaskLow', np.uint8(colormaskLow) * 255)
        imshow('colormaskHigh', np.uint8(colormaskHigh) * 255)

    #colormaskLow = np.bool8(np.ones(dep.shape))
    #colormaskHigh = np.bool8(np.ones(dep.shape))

    # get depth foreground seg...
    depthmask = (np.int32(tablemodel) - np.int32(dep) > TABLE_NOISE_THRESH)
    #imshow('above table', np.uint8(depthmask)*255)

    # table masking
    #depthmask = depthmask & mask
    #imshow('above table', np.uint8(depthmask)*255)

    # filter depthmask regions for ones that cross the mask
    depthmask2 = filterRegions(depthmask, mask)

    # get candidate regions from depth and handmaskLow
    regions = traceContours(depthmask2 & colormaskLow & mask)
    #regions = traceContours(depthmask2)

    # do a standard filtering for size and negative regions
    regions2 = [
        b for b in regions if len(b) > 10
        and util.blobsize(b, ignoreLessThan=MIN_HANDSIZE_FRAC * vidArea) >=
        MIN_HANDSIZE_FRAC * vidArea and not util.isNegative(b, dep)
    ]
    regions = regions2

    #imshow('hand region', np.uint8(util.regions2mask(regions))*255)

    # # filter regions by presence of high match pixels
    handmask = np.zeros(dep.shape, 'bool')
    for b in regions:
        regionmask = util.region2mask(b)
        #imshow('region', np.uint8(regionmask)*255)
        tmp = regionmask & colormaskHigh

        if cv2.countNonZero(np.uint8(tmp)) > 0:
            #if len(tmp.nonzero()) > 0:
            #print 'TRUE'
            handmask = handmask | regionmask

    #imshow('handmask', np.uint8(handmask)*255)
    return handmask
Beispiel #3
0
def isContainedInB1(blob, potContainer):
    vidArea = config.videoHeight * config.videoWidth
    minObjsize = vidArea * MIN_OBJSIZE_FRAC

    if not potContainer.offtable and isReceivingContainer(potContainer):
        areaIng = util.region2mask(blob)
        nAreaIng = cv2.countNonZero(np.uint8(areaIng))
        areaIngWithinContainer = areaIng & potContainer.fullMask(
            shape=(config.videoHeight, config.videoWidth))
        nAreaIngWithinContainer = cv2.countNonZero(
            np.uint8(areaIngWithinContainer))

        if (nAreaIngWithinContainer > minObjsize * .5) and (
                nAreaIng - nAreaIngWithinContainer <= minObjsize * .5):
            #imshow('areaIng', np.uint8(areaIng)*255)
            #print 'some blob is contained in', potContainer.name
            return True

    return False
Beispiel #4
0
def isContainedInB(blob, potContainer):
    vidArea = config.videoHeight * config.videoWidth
    minObjsize = vidArea * MIN_OBJSIZE_FRAC

    if not potContainer.offtable:
        areaIng = util.region2mask(blob)
        nAreaIng = cv2.countNonZero(np.uint8(areaIng))
        areaIngWithinContainer = areaIng & potContainer.fullMask(
            shape=(config.videoHeight, config.videoWidth))
        nAreaIngWithinContainer = cv2.countNonZero(
            np.uint8(areaIngWithinContainer))

        # if enough overlap and ...not too much lies outside
        #print potContainer.name, nAreaIngWithinContainer, '>', minObjsize*.5
        #print potContainer.name, nAreaIng - nAreaIngWithinContainer, '<=', minObjsize*.5
        if (nAreaIngWithinContainer > minObjsize * .25) and (
                nAreaIng - nAreaIngWithinContainer <= minObjsize * .75):
            #imshow('areaIng', np.uint8(areaIng)*255)
            #print 'some blob is contained in', potContainer.name
            return True

    return False
Beispiel #5
0
def refineForegroundZeroDepths1(fgmask, objects, tmp1, tmp2):
    fgmask2 = fgmask.copy()
    minObjsize = config.videoWidth * config.videoHeight * MIN_OBJSIZE_FRAC
    blobs = imageprocessing.traceContoursCV2(fgmask)

    for b in blobs:
        blobmask = util.region2mask(b)
        #imshow('blobmask', np.uint8(blobmask)*255)
        nAreaBlob = cv2.countNonZero(np.uint8(blobmask))

        for obj in objects:
            objZeroMask = obj.fullNoDepth(fgmask.shape)
            #imshow(obj.name+' zero mask', np.uint8(objZeroMask)*255)

            areaInside = blobmask & objZeroMask
            nAreaInside = cv2.countNonZero(np.uint8(areaInside))

            if (nAreaBlob - nAreaInside) < minObjsize:
                fgmask2 &= ~blobmask

    #imshow('fgmask refined', np.uint8(fgmask2)*255)
    return fgmask2
Beispiel #6
0
def makeObjects(img, dep, tablemodel, mask, tstep=1):
    vidArea = dep.shape[0] * dep.shape[1]

    fgMask = imageprocessing.subtractTable(img, dep, tablemodel, mask)
    #     imshow('fgmask', fgMask)

    regions = imageprocessing.traceContoursCV2(fgMask)
    #for b in regions:
    #     print len(b) > 2, util.blobsize(b) >= MIN_OBJSIZE_FRAC*vidArea, not util.isNegative(b, dep)
    regions = filter(
        lambda b: len(b) > 2 and util.blobsize(
            b, ignoreLessThan=MIN_OBJSIZE_FRAC * vidArea) >= MIN_OBJSIZE_FRAC *
        vidArea and not util.isNegative(b, dep), regions)

    tobjs = []
    i = 0
    for r in regions:
        objmask = util.region2mask(r)
        t = TableObject(img, dep, objmask, tstep)
        tobjs.append(t)
        i += 1

    return tobjs
Beispiel #7
0
def findHands(im, dep, tablemodel, plines, mask, truncate=False):
    mask0 = superHandMask(dep, tablemodel, mask)
    nMask0 = cv2.countNonZero(np.uint8(mask0))
    if nMask0 <= 0:
        return []

    handmask = getHandmask(im, dep, tablemodel, mask)

    regions = traceContours(handmask)
    res = []

    if truncate:
        regions2 = [
            truncateHand(regions[i], plines, np.zeros(im.shape, 'uint8'), id=i)
            for i in range(len(regions))
        ]
        regions = regions2

    for b, i in zip(regions, range(len(regions))):
        blobMask = util.region2mask(b)
        #imshow('sub hand', np.uint8(blobMask)*255)
        dep2 = dep.copy()
        dep2 = dep2 & (np.uint16(blobMask) * (0xffff))

        inds_i, inds_j = blobMask.nonzero()
        pi = np.mean(inds_i)
        pj = np.mean(inds_j)
        pz = np.average(dep2, weights=np.uint8(blobMask))

        h = Hand(pi, pj, pz, dep, blobMask)
        res.append(h)

    # if len(res) == 2:
    #imshow("handtracking", np.uint8(handmask)*255)

    return res
Beispiel #8
0
def createNew(img, dep, hands, state, flatTable, tablemodel, mask, timestep):
    vidArea = dep.shape[0] * dep.shape[1]

    fgMask = imageprocessing.subtractTableSimple(img, dep, tablemodel, mask)
    #imshow('fgmask orig', np.uint8(fgMask)*255)
    fgMask = refineForegroundZeroDepths(fgMask, state.objects, dep, timestep)
    #imshow('fgmask no zerodepth', np.uint8(fgMask)*255)

    handMask = handtracking.superHandMask(dep, flatTable, mask)
    fgMask = fgMask & ~handMask
    #imshow('fgmask no hand', np.uint8(fgMask)*255)

    fgMask = refineForegroundContained(fgMask, state.objects)
    #imshow('fgmask wo contained', np.uint8(fgMask)*255)

    #fgMask2 = np.uint8(fgMask)*255 + np.uint8(handMask)*85
    #imshow('fgmask w hand', fgMask2)

    #    imshow('depth, masked', dep*np.uint8(fgMask)*10)
    #    tmp = imageprocessing.subtractTableSimple(img, tablemodel, flatTable, mask)
    #    imshow('depth, masked without old foreground', tablemodel*np.uint8(~tmp)*10)
    regions = imageprocessing.traceContoursCV2(np.uint8(fgMask))
    #    for b in regions:
    #         print len(b) > 2, util.blobsize(b) >= MIN_OBJSIZE_FRAC*vidArea, not util.isNegative(b, dep)

    # MEAN SIZE FILTERING
    #print MIN_OBJSIZE_FRAC*vidArea
    regions = filter(
        lambda b: len(b) > 2 and util.blobsize(
            b, ignoreLessThan=MIN_OBJSIZE_FRAC * vidArea) >= MIN_OBJSIZE_FRAC *
        vidArea, regions)
    #imshow('obj. candidate regions1', np.uint8(util.regions2mask(regions))*255)

    regions = filter(lambda b: not util.isNegative(b, dep), regions)
    #    print 'blob sizes', [util.blobsize(b) for b in regions]
    #imshow('obj. candidate regions2', np.uint8(util.regions2mask(regions))*255)

    # istc demo hack
    # BIG ASSUMPTION: that there is only one new thing created
    # BA 2: that it is the last disappeared thing.
    if len(regions) > 1:
        print 'UH OH! More than one new region', regions

    # OBJECT RE-ENTERING EVENT DETECTION
    # BREAKS: 1. take off object, another hand appeared, then replace the object => new object.
    # incorporate better LOGIC when handtracking work
    # Simple logic => time two hands separetly then use the lastHandEnterTime from earliest one.
    reObjs = []
    for obj in state.objects:
        # if len(regions) == 1:
        #     print obj.name
        #     print state.lastHandEnterTime
        #     print obj.offtableTime

        # ICRA2012 heck - OBEJCT will naver be removed!!!
        #if obj.offtable and state.lastHandEnterTime < obj.offtableTime and len(regions) > 0:
        if obj.offtable and len(regions) > 0:
            reObjs.append(obj)
            region = regions[0]
            obj.initImages(img, dep, util.region2mask(region))
            obj.offtable = False
            regions = []

    tobjs = []
    i = 0
    for r in regions:
        objmask = util.region2mask(r)
        t = TableObject(img, dep, objmask, timestep)
        tobjs.append(t)
        i += 1

    #tobjs = [no for no in tobjs if all([not isContainedIn(no, oo) for oo in state.objects])]

    return tobjs, reObjs
Beispiel #9
0
    def initImages(self, img, dep, mask):
        # extract bounding rect from mask
        inds_i, inds_j = mask.nonzero()
        rect_y = inds_i.min()
        rect_x = inds_j.min()
        wid = inds_j.max() - inds_j.min()
        ht = inds_i.max() - inds_i.min()
        rect_y1 = inds_i.max()
        rect_x1 = inds_j.max()
        # rect is in (x, y, wid, ht) format
        self.rect = (rect_x, rect_y, wid, ht)
        #img2 = img.copy()
        #cv2.rectangle(img2, (self.rect[0], self.rect[1]), (self.rect[0]+self.rect[2], self.rect[1]+self.rect[3]), (255,0,0))
        #imshow('location', img2)
        #waitKey()

        # Save rotated rectangle information
        contours, trash = cv2.findContours(np.uint8(mask), cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_SIMPLE)
        cnt = contours[0]
        rect = cv2.minAreaRect(cnt)
        self.rotRect = rect

        # Save PointCloud information
        if not (img.shape[0] == 10 and img.shape[1] == 10):
            Tcw = np.genfromtxt("params/Tcw.txt")
            cnt2 = []
            for p in cnt:
                cnt2.append((p[0][0], p[0][1]))
            rotRectMask = util.region2mask(cnt2)
            cloud = toPointCloud(img, dep *
                                 rotRectMask)  # depth channel work as a mask
            newCloud = transformCloud(cloud, Tcw)
            # compute centroid and peak
            centroid = np.mean(newCloud[:, 0:3], 0)[0:3]
            peak = newCloud[newCloud[:, 2].argmax(), 0:3]

            # attemp to coordinate transform bounding box
            box = cv2.cv.BoxPoints(rect)
            pts = np.int16(box)

            tempDep = dep[rect_y:rect_y1, rect_x:rect_x1]
            avg = np.mean(tempDep)
            #print np.mean(tempDep)

            pts2 = [toPointXYZ(pt[0], pt[1], avg, 240, 320) for pt in pts]
            pts2 = np.hstack((np.array(pts2), np.ones((4, 1))))
            #print pts
            #print pts2
            boundingbox = np.dot(Tcw, np.array(pts2).T).T
            print boundingbox

            x = boundingbox[1, 0] - boundingbox[0, 0]
            y = boundingbox[1, 1] - boundingbox[0, 1]
            h = np.sqrt(x * x + y * y)
            print np.sin(1.0 * y / h)
            print centroid
            print np.sin(1.0 * y / h) - 3.14 + 1.57, centroid[0], centroid[
                1], 0.071  # take 1.57 out for perpendicular grasp

            # fig = plt.figure()
            # axis = fig.add_subplot(111, projection='3d')
            # scatterPointCloud(axis,newCloud,1)
            # axis.scatter(centroid[0],centroid[1],centroid[2],c='r')
            # axis.scatter(peak[0],peak[1],peak[2],c='r')

            # axis.scatter(boundingbox[0,0],boundingbox[0,1],centroid[2],c='r')
            # axis.scatter(boundingbox[1,0],boundingbox[1,1],centroid[2],c='r')
            # axis.scatter(boundingbox[2,0],boundingbox[2,1],centroid[2],c='r')
            # axis.scatter(boundingbox[3,0],boundingbox[3,1],centroid[2],c='r')
            # axis.scatter(boundingbox[0,0],boundingbox[0,1],0,c='r')
            # axis.scatter(boundingbox[1,0],boundingbox[1,1],0,c='r')
            # axis.scatter(boundingbox[2,0],boundingbox[2,1],0,c='r')
            # axis.scatter(boundingbox[3,0],boundingbox[3,1],0,c='r')
            # plt.show()

        DEBUG = True
        if DEBUG:  # Display box
            im = img.copy()
            box = cv2.cv.BoxPoints(rect)
            pts = np.int16(box)
            for j in range(len(pts)):
                cv2.line(im, tuple(pts[j]), tuple(pts[(j + 1) % 4]),
                         (0, 0, 255), 2)
            imshow("boundingbox", im)

        self.mask = mask[rect_y:rect_y1, rect_x:rect_x1]
        self.depth = dep[rect_y:rect_y1, rect_x:rect_x1]
        self.color = img[rect_y:rect_y1, rect_x:rect_x1, :]
        self.blob = None
        blobs = util.traceContoursCV2(mask)
        if len(blobs) > 0:
            self.blob = blobs[0]

        #imshow('dep'+self.name, self.depth*15)
        #imshow('col'+self.name, self.color)

        self.noDepth = (self.depth <= 0) * self.mask