def _visualizeOutput(self, netOutput): import os outdir = os.getcwd() + '\\out\\' netOutput = netOutput.transpose(0, 2, 3, 1) MaskOutput = [[] for _ in range(self.bz)] mVis = translib.stride_matrix(4) idx = 0 for i, (img, masks) in enumerate(zip(self.batchimgs, self.batchmasks)): height, width = img.shape[0:2] for j in range(len(masks)): predmap = netOutput[idx] predmap = predmap[:, :, 1] predmap[predmap > 0.5] = 1 predmap[predmap <= 0.5] = 0 predmap = cv2.cvtColor(predmap, cv2.COLOR_GRAY2BGR) predmap = cv2.warpAffine(predmap, mVis[0:2], (256, 256)) matrix = self.maskAlignMatrixs[i][j] matrix = mVis.dot(matrix) imgRoi = cv2.warpAffine(img, matrix[0:2], (256, 256)) mask = cv2.warpAffine(masks[j], matrix[0:2], (256, 256)) mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR) I = np.logical_and(mask, predmap) U = np.logical_or(mask, predmap) iou = I.sum() / U.sum() vis = np.hstack((imgRoi, mask * 255, predmap * 255)) cv2.imwrite( outdir + '%d_%d_%.2f.jpg' % (self.visCount, j, iou), np.uint8(vis)) idx += 1
def _calcAlignMatrixs(self): ## 1. transform kpts to feature coordinates. ## 2. featAlignMatrixs (size feature -> size align) used by affine-align ## 3. maskAlignMatrixs (size origin -> size output) used by Reverse affine-align ## matrix: size origin ->(m1)-> input ->(m2)-> feature ->(m3(mAug))-> align ->(m4)-> output size_input = self.size_input size_feat = self.size_feat size_align = self.size_align size_output = self.size_output m2 = translib.stride_matrix(size_feat / size_input) m4 = translib.stride_matrix(size_output / size_align) self.featAlignMatrixs = [[] for _ in range(self.bz)] self.maskAlignMatrixs = [[] for _ in range(self.bz)] if self.cat_skeleton: self.skeletonFeats = [[] for _ in range(self.bz)] for i, (matrix, kpts, masks) in enumerate( zip(self.inputMatrixs, self.batchkpts, self.batchmasks)): m1 = matrix # transform gt_kpts to feature coordinates. kpts = translib.warpAffineKpts(kpts, m2.dot(m1)) self.featAlignMatrixs[i] = np.zeros((len(kpts), 3, 3), dtype=np.float32) self.maskAlignMatrixs[i] = np.zeros((len(kpts), 3, 3), dtype=np.float32) if self.cat_skeleton: self.skeletonFeats[i] = np.zeros( (len(kpts), 55, size_align, size_align), dtype=np.float32) for j, (kpt, mask) in enumerate(zip(kpts, masks)): timers['2'].tic() ## best_align: {'category', 'template', 'matrix', 'score', 'history'} best_align = self.poseAlignOp.align(kpt, size_feat, size_feat, size_align, size_align, visualize=False, return_history=False) ## aug if self.training: mAug, _ = translib.get_aug_matrix(size_align, size_align, size_align, size_align, angle_range=(-30, 30), scale_range=(0.8, 1.2), trans_range=(-0.1, 0.1)) m3 = mAug.dot(best_align['matrix']) else: m3 = best_align['matrix'] self.featAlignMatrixs[i][j] = m3 self.maskAlignMatrixs[i][j] = m4.dot(m3).dot(m2).dot(m1) if self.cat_skeleton: # size_align (sigma=3, threshold=1) for size_align=64 self.skeletonFeats[i][j] = genSkeletons( translib.warpAffineKpts([kpt], m3), size_align, size_align, stride=1, sigma=3, threshold=1, visdiff=True).transpose(2, 0, 1)
def preprocess(batchimgs, batchkpts, batchmasks): bz = len(batchimgs) inputMatrixs = [translib.get_aug_matrix(img.shape[1], img.shape[0], 512, 512, angle_range=(-0., 0.), scale_range=(1., 1.), trans_range=(-0., 0.))[0] \ for img in batchimgs] inputs = [cv2.warpAffine(img, matrix[0:2], (512, 512)) \ for img, matrix in zip(batchimgs, inputMatrixs)] if len(inputs) == 1: inputs = inputs[0][np.newaxis, ...] else: inputs = np.array(inputs) inputs = inputs[..., ::-1] inputs = inputs.transpose(0, 3, 1, 2) inputs = inputs.astype('float32') ## 1. transform kpts to feature coordinates. ## 2. featAlignMatrixs (size feature -> size align) used by affine-align ## 3. maskAlignMatrixs (size origin -> size output) used by Reverse affine-align ## matrix: size origin ->(m1)-> input ->(m2)-> feature ->(m3(mAug))-> align ->(m4)-> output size_input = 512 size_feat = 128 size_align = 64 size_output = 64 cat_skeleton = True poseAlignOp = PoseAlign( template_file='/'.join(osp.abspath(__file__).split('/')[:-2]) + '/modeling/templates.json', visualize=False, factor=1.0) m2 = translib.stride_matrix(size_feat / size_input) m4 = translib.stride_matrix(size_output / size_align) featAlignMatrixs = [[] for _ in range(bz)] maskAlignMatrixs = [[] for _ in range(bz)] if cat_skeleton: skeletonFeats = [[] for _ in range(bz)] for i, (matrix, kpts) in enumerate(zip(inputMatrixs, batchkpts)): m1 = matrix # transform gt_kpts to feature coordinates. kpts = translib.warpAffineKpts(kpts, m2.dot(m1)) featAlignMatrixs[i] = np.zeros((len(kpts), 3, 3), dtype=np.float32) maskAlignMatrixs[i] = np.zeros((len(kpts), 3, 3), dtype=np.float32) if cat_skeleton: skeletonFeats[i] = np.zeros( (len(kpts), 55, size_align, size_align), dtype=np.float32) for j, kpt in enumerate(kpts): ## best_align: {'category', 'template', 'matrix', 'score', 'history'} best_align = poseAlignOp.align(kpt, size_feat, size_feat, size_align, size_align, visualize=False, return_history=False) m3 = best_align['matrix'] featAlignMatrixs[i][j] = m3 maskAlignMatrixs[i][j] = m4.dot(m3).dot(m2).dot(m1) if cat_skeleton: # size_align (sigma=3, threshold=1) for size_align=64 skeletonFeats[i][j] = genSkeletons( translib.warpAffineKpts([kpt], m3), size_align, size_align, stride=1, sigma=3, threshold=1, visdiff=True).transpose(2, 0, 1) return [ batchimgs, batchkpts, batchmasks, inputMatrixs, inputs, featAlignMatrixs, maskAlignMatrixs, skeletonFeats ]