fig.savefig('./eval/'+eval_prefix+'/'+eval_prefix+'_errs.png') # save results poseNet.save("./eval/{}/net_{}.pkl".format(eval_prefix, eval_prefix)) # poseNet.load("./eval/{}/net_{}.pkl".format(eval_prefix,eval_prefix)) #################################################### # TEST print("Testing ...") gt3D = [j.gt3Dorig[di.crop_joint_idx].reshape(1, 3) for j in testSeqs[0].data] jts = poseNet.computeOutput([test_data, test_data2, test_data4]) joints = [] for i in range(test_data.shape[0]): joints.append(jts[i].reshape(1, 3)*(testSeqs[0].config['cube'][2]/2.) + testSeqs[0].data[i].com) hpe = MSRAHandposeEvaluation(gt3D, joints) hpe.subfolder += '/'+eval_prefix+'/' print(("Mean error: {}mm, max error: {}mm".format(hpe.getMeanError(), hpe.getMaxError()))) # save results pickle.dump(joints, open("./eval/{}/result_{}_{}.pkl".format(eval_prefix,os.path.split(__file__)[1],eval_prefix), "wb"), protocol=pickle.HIGHEST_PROTOCOL) print("Testing baseline") ################################# # BASELINE com = [j.com for j in testSeqs[0].data] hpe_com = MSRAHandposeEvaluation(gt3D, numpy.asarray(com).reshape((len(gt3D), 1, 3))) hpe_com.subfolder += '/'+eval_prefix+'/' print(("Mean error: {}mm".format(hpe_com.getMeanError())))
def show(self, frame, handpose): """ Show depth with overlaid joints :param frame: depth frame :param handpose: joint positions :return: image """ upsample = 1. if 'upsample' in self.sync['config']: upsample = self.sync['config']['upsample'] # plot depth image with annotations imgcopy = frame.copy() # display hack to hide nd depth msk = numpy.logical_and(32001 > imgcopy, imgcopy > 0) msk2 = numpy.logical_or(imgcopy == 0, imgcopy == 32001) min = imgcopy[msk].min() max = imgcopy[msk].max() imgcopy = (imgcopy - min) / (max - min) * 255. imgcopy[msk2] = 255. imgcopy = imgcopy.astype('uint8') imgcopy = cv2.cvtColor(imgcopy, cv2.COLOR_GRAY2BGR) if not numpy.allclose(upsample, 1): imgcopy = cv2.resize(imgcopy, dsize=None, fx=upsample, fy=upsample, interpolation=cv2.INTER_LINEAR) if handpose.shape[0] == 16: hpe = ICVLHandposeEvaluation(numpy.zeros((3, 3)), numpy.zeros((3, 3))) elif handpose.shape[0] == 14: hpe = NYUHandposeEvaluation(numpy.zeros((3, 3)), numpy.zeros( (3, 3))) elif handpose.shape[0] == 21: hpe = MSRAHandposeEvaluation(numpy.zeros((3, 3)), numpy.zeros((3, 3))) else: raise ValueError("Invalid number of joints {}".format( handpose.shape[0])) jtI = self.importer.joints3DToImg(handpose) jtI[:, 0:2] -= numpy.asarray([frame.shape[0] // 2, frame.shape[1] // 2]) jtI[:, 0:2] *= upsample jtI[:, 0:2] += numpy.asarray( [imgcopy.shape[0] // 2, imgcopy.shape[1] // 2]) for i in range(handpose.shape[0]): cv2.circle(imgcopy, (jtI[i, 0], jtI[i, 1]), 3, (255, 0, 0), -1) for i in range(len(hpe.jointConnections)): cv2.line(imgcopy, (jtI[hpe.jointConnections[i][0], 0], jtI[hpe.jointConnections[i][0], 1]), (jtI[hpe.jointConnections[i][1], 0], jtI[hpe.jointConnections[i][1], 1]), 255. * hpe.jointConnectionColors[i], 2) # comI = self.importer.joint3DToImg(com3D) # comI[0:2] -= numpy.asarray([frame.shape[0]//2, frame.shape[1]//2]) # comI[0:2] *= upsample # comI[0:2] += numpy.asarray([imgcopy.shape[0]//2, imgcopy.shape[1]//2]) # cv2.circle(imgcopy, (comI[0], comI[1]), 3, (0, 255, 0), 1) poseimg = numpy.zeros_like(imgcopy) # rotate 3D pose and project to 2D jtP = self.importer.joints3DToImg( rotatePoints3D(handpose, handpose[self.importer.crop_joint_idx], 0., 90., 0.)) jtP[:, 0:2] -= numpy.asarray([frame.shape[0] // 2, frame.shape[1] // 2]) jtP[:, 0:2] *= upsample jtP[:, 0:2] += numpy.asarray( [imgcopy.shape[0] // 2, imgcopy.shape[1] // 2]) for i in range(handpose.shape[0]): cv2.circle(poseimg, (jtP[i, 0], jtP[i, 1]), 3, (255, 0, 0), -1) for i in range(len(hpe.jointConnections)): cv2.line(poseimg, (jtP[hpe.jointConnections[i][0], 0], jtP[hpe.jointConnections[i][0], 1]), (jtP[hpe.jointConnections[i][1], 0], jtP[hpe.jointConnections[i][1], 1]), 255. * hpe.jointConnectionColors[i], 2) # comP = self.importer.joint3DToImg(rotatePoint3D(com3D, handpose[self.importer.crop_joint_idx], 0., 90., 0.)) # comP[0:2] -= numpy.asarray([frame.shape[0]//2, frame.shape[1]//2]) # comP[0:2] *= upsample # comP[0:2] += numpy.asarray([imgcopy.shape[0]//2, imgcopy.shape[1]//2]) # cv2.circle(poseimg, (comP[0], comP[1]), 3, (0, 255, 0), 1) return imgcopy, poseimg
start_idx = 0 else: start_idx = int(start_idx) else: print('Start frame index is {}'.format(start_idx)) rng = numpy.random.RandomState(23455) # subset to label subset_idxs = [] if person == 'P0': di = MSRA15Importer('/home/boonyew/Documents/semi-auto-anno-master/semi-auto-anno/data/msra/', useCache=False) Seq2 = di.loadSequence(person, shuffle=False) hc = MSRAHandConstraints([Seq2.name]) hpe = MSRAHandposeEvaluation([j.gt3Dorig for j in Seq2.data], [j.gt3Dorig for j in Seq2.data]) for idx, seq in enumerate(Seq2.data): ed = {'vis': [], 'pb': {'pb': [], 'pbp': []}} Seq2.data[idx] = seq._replace(gtorig=numpy.zeros_like(seq.gtorig), extraData=ed) # common subset for all subset_idxs = [16, 21, 26, 29, 45, 49, 52, 54, 58, 104, 108, 114, 138, 144, 148, 170, 175, 178, 210, 214, 217, 231, 237, 249, 252, 259, 264, 283, 287, 296, 307, 345, 370, 381, 384, 386, 405, 412, 423, 429, 436, 458, 465, 469, 490, 498, 505, 526, 530, 533, 537, 546, 553, 576, 607, 612, 624, 631, 657, 667, 669, 673, 685, 697, 704, 735, 742, 751, 765, 781, 784, 789, 793, 801, 805, 816, 820, 827, 830, 874, 886, 888, 893, 896, 899, 911, 923, 934, 962, 969, 983, 1023, 1027, 1029, 1034, 1046, 1054, 1057, 1070, 1075, 1085, 1093, 1098, 1110, 1114, 1134, 1138, 1146, 1173, 1181, 1184, 1188, 1191, 1194, 1208, 1213, 1221, 1224, 1228, 1241, 1248, 1251, 1255, 1262, 1267, 1274, 1286, 1295, 1308, 1312, 1335, 1341, 1349, 1353, 1383, 1386, 1389, 1410, 1414, 1422, 1432, 1449, 1452, 1455, 1465, 1473, 1477, 1489, 1504, 1523, 1532, 1542, 1550, 1552, 1571, 1580, 1586, 1591, 1609, 1613, 1617, 1628, 1632, 1644, 1653, 1656, 1688, 1694, 1695, 1698, 1709, 1713, 1725, 1745, 1752, 1756, 1762, 1772, 1778, 1795, 1812, 1814, 1817, 1830, 1833, 1848, 1853, 1858, 1864, 1869, 1873, 1887, 1892, 1897, 1904, 1927, 1930, 1934, 1937, 1965, 1973, 1978, 1991, 2017, 2028, 2033, 2048, 2055, 2058, 2067, 2074, 2094, 2131, 2137, 2146, 2150, 2166, 2170, 2177, 2185, 2191, 2196, 2203, 2208, 2213, 2222, 2255, 2269, 2273, 2288, 2291, 2298, 2305, 2325, 2331, 2334, 2339, 2343, 2347, 2351, 2372, 2380, 2390, 2394, 2416, 2428, 2434, 2462, 2468, 2484, 2497, 2504, 2509, 2511, 2515, 2529, 2543, 2566, 2572, 2584, 2590, 2609, 2617, 2627, 2631, 2644, 2651, 2654, 2661, 2685, 2687, 2693, 2702, 2737, 2749, 2754, 2763, 2775, 2778, 2790, 2792, 2808, 2813, 2816, 2820, 2829, 2835, 2852, 2856, 2872, 2891, 2898, 2905, 2911, 2942, 2945, 2949, 2952, 2989, 3011, 3015, 3031, 3034, 3037] else: raise NotImplementedError("") replace_off = 0 replace_file = None # './params_tracking.npz' output_path = di.basepath filename_joints = output_path+person+'/joint_'+username+'.txt'
print("Testing ...") gt3D = [j.gt3Dorig for j in testSeqs[0].data] joints = [] jts_embed = poseNet.computeOutput(test_data) # Backtransform from embedding # jts = diaboloNet.computeOutputFromEmbedding(jts_embed) # calculate pose from codes # jts = pca.inverse_transform(jts_embed) jts = jts_embed for i in range(test_data.shape[0]): joints.append(jts[i].reshape((-1, 3)) * (testSeqs[0].config['cube'][2] / 2.) + testSeqs[0].data[i].com) joints = numpy.array(joints) hpe = MSRAHandposeEvaluation(gt3D, joints) hpe.subfolder += '/' + eval_prefix + '/' print("Train samples: {}, test samples: {}".format( train_data.shape[0], len(gt3D))) print("Mean error: {}mm, max error: {}mm".format( hpe.getMeanError(), hpe.getMaxError())) print("{}".format( [hpe.getJointMeanError(j) for j in range(joints[0].shape[0])])) print("{}".format( [hpe.getJointMaxError(j) for j in range(joints[0].shape[0])])) # save results cPickle.dump(gt3D, open("./eval/{}/gt_{}.pkl".format(eval_prefix, icv), "wb"), protocol=cPickle.HIGHEST_PROTOCOL)