def main(): ## flags isLoadDepthData = False ## sliding window winSize = 5 winStep = 3 ## load depth data and save to file if isLoadDepthData: loadDepthData() ## load dataset trainActionList = loadTrainDataset() ## process for training for trainAction in trainActionList: ifile = open(trainAction.depthSequenceFile, 'r') depthSequence = pickle.load(ifile) ifile.close() xoyImgList = [] xozImgList = [] yozImgList = [] # get xoy, xoz and yoz images from depth data for depthData in depthSequence: # # depth image visualization # # color map image # grayImg = mat2gray(depthData) # colorMapImg = applyColorMap(grayImg, cv2.COLORMAP_JET) # cv2.imshow('', colorMapImg) # cv2.waitKey() # # point cloud visualization # points = getWorldCoordinates(depthData) # visualizePointCloud(points) xoyImg = mat2gray(depthData) xozImg, yozImg = getProjectionImages(depthData) xoyImgList.append(xoyImg) xozImgList.append(xozImg) yozImgList.append(yozImg) trainAction.xoyImgList = xoyImgList trainAction.xozImgList = xozImgList trainAction.yozImgList = yozImgList # windowed DMHI xoyDmhiList = calWinDepthMHIList(trainAction.xoyImgList, winSize, winStep) xozDmhiList = calWinDepthMHIList(trainAction.xozImgList, winSize, winStep) yozDmhiList = calWinDepthMHIList(trainAction.yozImgList, winSize, winStep) # post processing, crop and resize postXoyDmhiList = cropAndResizeImageList(xoyDmhiList) postXozDmhiList = cropAndResizeImageList(xozDmhiList) postYozDmhiList = cropAndResizeImageList(yozDmhiList)
def getDepthProjection(points, isCrop = True): """ FUNC: get projections by counting the number of 3D points PARAM: points: point clouds crop: crop the ROI or not RETURN: resultProjImg: the projection image (range: 0-255) """ tmp_points = points.copy() min_xw = np.min(tmp_points[:,0]) min_yw = np.min(tmp_points[:,1]) tmp_points[:,0] -= min_xw tmp_points[:,1] -= min_yw # width = math.ceil(np.max(tmp_points[:,0])) + 1 # height = math.ceil(np.max(tmp_points[:,1])) + 1 width = 800 height = 800 n_points, _ = tmp_points.shape projData = np.zeros((height, width)) for n in xrange(n_points): [xw, yw, zw] = tmp_points[n, :] if zw != 0: rowIdx = math.floor(yw) colIdx = math.floor(xw) projData[rowIdx, colIdx] += 1 projImg = mat2gray(projData) # scale to 0-255 # post processing # closing (Dilation followed by Erosion) kernel = np.ones((3, 3), np.uint8) projImg1 = cv2.morphologyEx(projImg, cv2.MORPH_CLOSE, kernel) projImg2 = cv2.equalizeHist(projImg1) if isCrop: boxRegion = findBoxRegion(projImg2) top, bottom, left, right = boxRegion resultProjImg = projImg2[top:bottom, left:right] else: resultProjImg = projImg2.copy() return resultProjImg
def getDepthProjection(points, isCrop=True): """ FUNC: get projections by counting the number of 3D points PARAM: points: point clouds crop: crop the ROI or not RETURN: resultProjImg: the projection image (range: 0-255) """ tmp_points = points.copy() min_xw = np.min(tmp_points[:, 0]) min_yw = np.min(tmp_points[:, 1]) tmp_points[:, 0] -= min_xw tmp_points[:, 1] -= min_yw # width = math.ceil(np.max(tmp_points[:,0])) + 1 # height = math.ceil(np.max(tmp_points[:,1])) + 1 width = 800 height = 800 n_points, _ = tmp_points.shape projData = np.zeros((height, width)) for n in xrange(n_points): [xw, yw, zw] = tmp_points[n, :] if zw != 0: rowIdx = math.floor(yw) colIdx = math.floor(xw) projData[rowIdx, colIdx] += 1 projImg = mat2gray(projData) # scale to 0-255 # post processing # closing (Dilation followed by Erosion) kernel = np.ones((3, 3), np.uint8) projImg1 = cv2.morphologyEx(projImg, cv2.MORPH_CLOSE, kernel) projImg2 = cv2.equalizeHist(projImg1) if isCrop: boxRegion = findBoxRegion(projImg2) top, bottom, left, right = boxRegion resultProjImg = projImg2[top:bottom, left:right] else: resultProjImg = projImg2.copy() return resultProjImg
def showDepthData(depthData, isColorMap = True): """ FUNC: show depth data PARAM: depthData: raw depth data RETURN: """ depthImage = mat2gray(depthData) if isColorMap: depthImage = cv2.applyColorMap(depthImage, cv2.COLORMAP_JET) cv2.imshow('', depthImage) cv2.waitKey()
def showDepthData(depthData, isColorMap=True): """ FUNC: show depth data PARAM: depthData: raw depth data RETURN: """ depthImage = mat2gray(depthData) if isColorMap: depthImage = cv2.applyColorMap(depthImage, cv2.COLORMAP_JET) cv2.imshow('', depthImage) cv2.waitKey()
def calDepthMHI(frames, motion_thresh = 10, stride = 1, isCrop = True): """ FUNC: Calculate DMHI from given sequence (a list of frames) PARAM: frames: a list of frames motion_thresh: threshold for detection of motion region stride: stride of calculation of difference between frames isCrop: crop ROI or not RETURN: dmhi: depth motion history image """ duration = len(frames) firstFrm = frames[0] firstFrm = cv2.GaussianBlur(firstFrm, (3, 3), 0) # Gaussian blur height, width = firstFrm.shape # a set of DMHIs D_MHIs = [] D_MHIs.append(np.zeros((height, width)).astype(np.int32)) for i in xrange(1, duration, stride): prevFrmIndex = i - stride if (prevFrmIndex >= 0): # current image currFrm = frames[i] currFrm = cv2.GaussianBlur(currFrm, (3, 3), 0) # Gaussian blur # previous image prevFrm = frames[prevFrmIndex] prevFrm = cv2.GaussianBlur(prevFrm, (3, 3), 0) # Gaussian blur # frame difference motionImg = depthFrameDiff(currFrm, prevFrm, motion_thresh) # DMHI # if D == 1 DMHI = motionImg.copy() DMHI[np.where(motionImg == 1)] = duration # otherwise tmp = np.maximum(0, D_MHIs[-1] - 1) idx = np.where(motionImg != 1) DMHI[idx] = tmp[idx] D_MHIs.append(DMHI) # save mhi image to file # dmhiImg = mat2gray(DMHI) # cv2.imwrite('mhi_%i.png' %i, dmhiImg) # get the result finalDMHI = D_MHIs[-1] # convert to image dmhi = mat2gray(finalDMHI) dmhi = cv2.GaussianBlur(dmhi, (3, 3), 0) # Gaussian blur # crop or not if isCrop: boxRegion = findBoxRegion(dmhi) top, bottom, left, right = boxRegion resultDmhi = dmhi[top:bottom, left:right] else: resultDmhi = dmhi.copy() return resultDmhi