def getHandmaskHSV(im, dep, tablemodel, mask): DEBUG = False vidArea = dep.shape[0] * dep.shape[1] # get colormasks colormaskLow, colormaskHigh = getColorMasks(im) if DEBUG: imshow('colormaskLow', np.uint8(colormaskLow) * 255) imshow('colormaskHigh', np.uint8(colormaskHigh) * 255) #colormaskLow = np.bool8(np.ones(dep.shape)) #colormaskHigh = np.bool8(np.ones(dep.shape)) # get depth foreground seg... depthmask = (np.int32(tablemodel) - np.int32(dep) > TABLE_NOISE_THRESH) #imshow('above table', np.uint8(depthmask)*255) # table masking #depthmask = depthmask & mask #imshow('above table', np.uint8(depthmask)*255) # filter depthmask regions for ones that cross the mask depthmask2 = filterRegions(depthmask, mask) # get candidate regions from depth and handmaskLow regions = traceContours(depthmask2 & colormaskLow & mask) #regions = traceContours(depthmask2) # do a standard filtering for size and negative regions regions2 = [ b for b in regions if len(b) > 10 and util.blobsize(b, ignoreLessThan=MIN_HANDSIZE_FRAC * vidArea) >= MIN_HANDSIZE_FRAC * vidArea and not util.isNegative(b, dep) ] regions = regions2 #imshow('hand region', np.uint8(util.regions2mask(regions))*255) # # filter regions by presence of high match pixels handmask = np.zeros(dep.shape, 'bool') for b in regions: regionmask = util.region2mask(b) #imshow('region', np.uint8(regionmask)*255) tmp = regionmask & colormaskHigh if cv2.countNonZero(np.uint8(tmp)) > 0: #if len(tmp.nonzero()) > 0: #print 'TRUE' handmask = handmask | regionmask #imshow('handmask', np.uint8(handmask)*255) return handmask
def refineForegroundContained(fgmask, oldobjects): minObjsize = config.videoHeight * config.videoWidth * MIN_OBJSIZE_FRAC # minimum allowed object size blobs = imageprocessing.traceContoursCV2(fgmask) receiverObjs = [obj for obj in oldobjects if isReceivingContainer(obj)] blobsContained = [] for obj in receiverObjs: for b in blobs: if len(b) > 2 and util.blobsize(b, ignoreLessThan=minObjsize ) >= minObjsize and isContainedInB( b, obj): blobsContained.append(b) #print '! FOUND contained blob' #imshow('fgmask before contained', np.uint8(fgmask)*255) containedMask = util.regions2mask(blobsContained) #imshow('containedMask', np.uint8(containedMask)*255) fgmask2 = fgmask & ~containedMask #imshow('fgmask refined contained', np.uint8(fgmask2)*255) return fgmask2
def filterRegions(regionsMask, mask): vidArea = mask.shape[0] * mask.shape[1] regions = traceContours(regionsMask) regions = filter( lambda b: len(b) > 2 and util.blobsize( b, ignoreLessThan=MIN_HANDSIZE_FRAC * vidArea) >= MIN_HANDSIZE_FRAC * vidArea, regions) #imshow('after size filtering', np.uint8(util.regions2mask(regions)*255)) #imshow('mask',mask) regions = filter(lambda b: crossesMask(b, mask), regions) #imshow('after cross filtering', np.uint8(util.regions2mask(regions)*255)) regionsMask2 = np.zeros(regionsMask.shape, 'uint8') cv2.fillPoly(regionsMask2, [np.array(blob, dtype="int32") for blob in regions], 1) regionsMask2 = np.bool8(regionsMask2) #imshow('above table 2', np.uint8(regionsMask2)*255) return regionsMask2
def cropImage(img, dep, mask): bw = mask.copy() bw = cv2.dilate(bw, None) bw = cv2.dilate(bw, None) bw = cv2.erode(bw, None) bw = cv2.erode(bw, None) cvimg = util.array2cv(np.uint8(bw) * 255) blobs = traceContoursCV2(np.uint8(bw)) blobs = [b for b in blobs if len(b) > 2] blobs = [b for b in blobs if util.blobsize(b, ignoreLessThan=600) > 600] if blobs: #for b in blobs: drawBlob(imgS, b, color=(255,0,0)) #imshow('bw', np.uint8(bw)*255) blobs.sort(key=util.blobsize) blob = blobs[0] roi = util.boundingRect(blob) padding = 30 print roi x0 = max(0, roi[0] - padding) x1 = min(img.shape[1] - 1, roi[0] + roi[2] + padding) y0 = max(0, roi[1] - padding) y1 = min(img.shape[0] - 1, roi[1] + roi[3] + padding) cropped = img[y0:y1, x0:x1] #imshow('cropped', cropped) depCropped = dep[y0:y1, x0:x1] #imshow('depcropped', depCropped*10) maskCropped = bw[y0:y1, x0:x1] * 255 return cropped, depCropped, maskCropped else: return None, None, None
def makeObjects(img, dep, tablemodel, mask, tstep=1): vidArea = dep.shape[0] * dep.shape[1] fgMask = imageprocessing.subtractTable(img, dep, tablemodel, mask) # imshow('fgmask', fgMask) regions = imageprocessing.traceContoursCV2(fgMask) #for b in regions: # print len(b) > 2, util.blobsize(b) >= MIN_OBJSIZE_FRAC*vidArea, not util.isNegative(b, dep) regions = filter( lambda b: len(b) > 2 and util.blobsize( b, ignoreLessThan=MIN_OBJSIZE_FRAC * vidArea) >= MIN_OBJSIZE_FRAC * vidArea and not util.isNegative(b, dep), regions) tobjs = [] i = 0 for r in regions: objmask = util.region2mask(r) t = TableObject(img, dep, objmask, tstep) tobjs.append(t) i += 1 return tobjs
def createNew(img, dep, hands, state, flatTable, tablemodel, mask, timestep): vidArea = dep.shape[0] * dep.shape[1] fgMask = imageprocessing.subtractTableSimple(img, dep, tablemodel, mask) #imshow('fgmask orig', np.uint8(fgMask)*255) fgMask = refineForegroundZeroDepths(fgMask, state.objects, dep, timestep) #imshow('fgmask no zerodepth', np.uint8(fgMask)*255) handMask = handtracking.superHandMask(dep, flatTable, mask) fgMask = fgMask & ~handMask #imshow('fgmask no hand', np.uint8(fgMask)*255) fgMask = refineForegroundContained(fgMask, state.objects) #imshow('fgmask wo contained', np.uint8(fgMask)*255) #fgMask2 = np.uint8(fgMask)*255 + np.uint8(handMask)*85 #imshow('fgmask w hand', fgMask2) # imshow('depth, masked', dep*np.uint8(fgMask)*10) # tmp = imageprocessing.subtractTableSimple(img, tablemodel, flatTable, mask) # imshow('depth, masked without old foreground', tablemodel*np.uint8(~tmp)*10) regions = imageprocessing.traceContoursCV2(np.uint8(fgMask)) # for b in regions: # print len(b) > 2, util.blobsize(b) >= MIN_OBJSIZE_FRAC*vidArea, not util.isNegative(b, dep) # MEAN SIZE FILTERING #print MIN_OBJSIZE_FRAC*vidArea regions = filter( lambda b: len(b) > 2 and util.blobsize( b, ignoreLessThan=MIN_OBJSIZE_FRAC * vidArea) >= MIN_OBJSIZE_FRAC * vidArea, regions) #imshow('obj. candidate regions1', np.uint8(util.regions2mask(regions))*255) regions = filter(lambda b: not util.isNegative(b, dep), regions) # print 'blob sizes', [util.blobsize(b) for b in regions] #imshow('obj. candidate regions2', np.uint8(util.regions2mask(regions))*255) # istc demo hack # BIG ASSUMPTION: that there is only one new thing created # BA 2: that it is the last disappeared thing. if len(regions) > 1: print 'UH OH! More than one new region', regions # OBJECT RE-ENTERING EVENT DETECTION # BREAKS: 1. take off object, another hand appeared, then replace the object => new object. # incorporate better LOGIC when handtracking work # Simple logic => time two hands separetly then use the lastHandEnterTime from earliest one. reObjs = [] for obj in state.objects: # if len(regions) == 1: # print obj.name # print state.lastHandEnterTime # print obj.offtableTime # ICRA2012 heck - OBEJCT will naver be removed!!! #if obj.offtable and state.lastHandEnterTime < obj.offtableTime and len(regions) > 0: if obj.offtable and len(regions) > 0: reObjs.append(obj) region = regions[0] obj.initImages(img, dep, util.region2mask(region)) obj.offtable = False regions = [] tobjs = [] i = 0 for r in regions: objmask = util.region2mask(r) t = TableObject(img, dep, objmask, timestep) tobjs.append(t) i += 1 #tobjs = [no for no in tobjs if all([not isContainedIn(no, oo) for oo in state.objects])] return tobjs, reObjs
def segmentAndMask(img0, imgDepth0, staticMap0, mask0, thresh=10): debug = True staticMap = np.array(staticMap0) imgDepth = np.array(imgDepth0) img = np.array(img0) mask = np.array(mask0) #imshow('indepth', imgDepth*10) staticMap = np.int16(staticMap) imgDepth = np.int16(imgDepth) noDepthMask = imgDepth == 0 #imshow('no depth mask', np.uint8(noDepthMask)*255) diff = (imgDepth - staticMap) adiff = np.abs(diff) tableMask = adiff < thresh if debug: #imshow('adiff', adiff*15) print imgDepth.max(), staticMap.max() #imshow('table mask', np.uint8(tableMask)*255) # numpy setting is really slow maskout = (tableMask) | (mask <= 0) maskin = (tableMask <= 0) & (mask) cvMask = util.array2cv(np.uint8(maskout) * 255) cvImg = util.array2cv(img) cv.Set(cvImg, (0, 0, 0), cvMask) img = util.cv2array(cvImg) #img[tableMask] = 0 #img[mask <= 0] = 0 cvDep = util.array2cv(imgDepth) depthBw = imgDepth > 0 depthBw = depthBw | noDepthMask depthBw = depthBw & maskin depthBw = np.uint8(depthBw) * 255 cv.Set(cvDep, 0, cvMask) imgDepth = util.cv2array(cvDep) imgDepth = np.uint16(imgDepth) # Find contours. Only keep the large ones to reduce noise. param = 2 for d in range(param): depthBw = cv2.dilate(depthBw, None) for d in range(param): depthBw = cv2.erode(depthBw, None) #imshow('depthbw', depthBw) blobs = [] blobs = traceContoursCV2(depthBw) blobs = [b for b in blobs if len(b) > 2] blobs = [b for b in blobs if util.blobsize(b, ignoreLessThan=150) > 150] foregroundMask = np.zeros(imgDepth.shape, 'uint8') mat = util.array2cv(foregroundMask) cv.FillPoly(mat, blobs, 255) foregroundMask = util.cv2array(mat) #imshow('foreground', foregroundMask) bgMask = util.array2cv(np.uint8(foregroundMask < 0) * 255) cv.Set(cvImg, (0, 0, 0), bgMask) cv.Set(cvDep, 0, bgMask) img = util.cv2array(cvImg) imgDepth = util.cv2array(cvDep) imgDepth = np.uint16(imgDepth) if debug: #imshow('seg img', .5*np.float32(img0) + .5*np.float32(img0[foregroundMask])) img1 = img0.copy() img1[foregroundMask <= 0] = .5 * img1[foregroundMask <= 0] #imshow('seg img', img1) #imshow('seg dep', imgDepth*10) #imshow('smoothed foreground mask', foregroundMask) return img, imgDepth, foregroundMask