fig.savefig('./eval/'+eval_prefix+'/'+eval_prefix+'_errs.png') # save results poseNet.save("./eval/{}/net_{}.pkl".format(eval_prefix, eval_prefix)) # poseNet.load("./eval/{}/net_{}.pkl".format(eval_prefix,eval_prefix)) #################################################### # TEST print("Testing ...") gt3D = [j.gt3Dorig[di.crop_joint_idx].reshape(1, 3) for j in testSeqs[0].data] jts = poseNet.computeOutput([test_data, test_data2, test_data4]) joints = [] for i in xrange(test_data.shape[0]): joints.append(jts[i].reshape(1, 3)*(testSeqs[0].config['cube'][2]/2.) + testSeqs[0].data[i].com) hpe = ICVLHandposeEvaluation(gt3D, joints) hpe.subfolder += '/'+eval_prefix+'/' print("Mean error: {}mm, max error: {}mm".format(hpe.getMeanError(), hpe.getMaxError())) # save results cPickle.dump(joints, open("./eval/{}/result_{}_{}.pkl".format(eval_prefix,os.path.split(__file__)[1],eval_prefix), "wb"), protocol=cPickle.HIGHEST_PROTOCOL) print "Testing baseline" ################################# # BASELINE # Load the evaluation data_baseline = di.loadBaseline('../data/ICVL/LRF_Results_seq_1.txt') hpe_base = ICVLHandposeEvaluation(gt3D, numpy.asarray(data_baseline)[:, di.crop_joint_idx, :].reshape((len(gt3D), 1, 3))) hpe_base.subfolder += '/'+eval_prefix+'/'
################################################################### # test print("Testing ...") gt3D = [j.gt3Dorig for j in testSeqs[0].data] jts_embed = poseNet.computeOutput(test_data) jts = jts_embed joints = [] for i in range(test_data.shape[0]): joints.append(jts[i].reshape(gt3D[0].shape[0], 3) * (testSeqs[0].config['cube'][2] / 2.) + testSeqs[0].data[i].com) joints = numpy.array(joints) hpe = ICVLHandposeEvaluation(gt3D, joints) hpe.subfolder += '/' + eval_prefix + '/' mean_error = hpe.getMeanError() max_error = hpe.getMaxError() print("Train samples: {}, test samples: {}".format(train_data.shape[0], len(gt3D))) print("Mean error: {}mm, max error: {}mm".format(mean_error, max_error)) print("MD score: {}".format(hpe.getMDscore(80))) print("{}".format( [hpe.getJointMeanError(j) for j in range(joints[0].shape[0])])) print("{}".format( [hpe.getJointMaxError(j) for j in range(joints[0].shape[0])])) # save results cPickle.dump(joints,
fig.savefig('./eval/'+eval_prefix+'/'+eval_prefix+'_errs.png') # save results poseNet.save("./eval/{}/net_{}.pkl".format(eval_prefix, eval_prefix)) # poseNet.load("./eval/{}/net_{}.pkl".format(eval_prefix,eval_prefix)) #################################################### # TEST print("Testing ...") gt3D = [j.gt3Dorig[0].reshape(1, 3) for j in testSeqs[0].data] jts = poseNet.computeOutput([test_data, test_data2, test_data4]) joints = [] for i in xrange(test_data.shape[0]): joints.append(jts[i].reshape(1, 3)*(testSeqs[0].config['cube'][2]/2.) + testSeqs[0].data[i].com) hpe = ICVLHandposeEvaluation(gt3D, joints) hpe.subfolder += '/'+eval_prefix+'/' mean_error = hpe.getMeanError() max_error = hpe.getMaxError() print("Mean error: {}mm, max error: {}mm".format(mean_error, max_error)) # save results cPickle.dump(joints, open("./eval/{}/result_{}_{}.pkl".format(eval_prefix,os.path.split(__file__)[1],eval_prefix), "wb"), protocol=cPickle.HIGHEST_PROTOCOL) print "Testing baseline" ################################# # BASELINE # Load the evaluation data_baseline = di.loadBaseline('../data/ICVL/LRF_Results_seq_1.txt')
def show(self, frame, handpose): """ Show depth with overlaid joints :param frame: depth frame :param handpose: joint positions :return: image """ upsample = 1. if 'upsample' in self.sync['config']: upsample = self.sync['config']['upsample'] # plot depth image with annotations imgcopy = frame.copy() # display hack to hide nd depth msk = numpy.logical_and(32001 > imgcopy, imgcopy > 0) msk2 = numpy.logical_or(imgcopy == 0, imgcopy == 32001) min = imgcopy[msk].min() max = imgcopy[msk].max() imgcopy = (imgcopy - min) / (max - min) * 255. imgcopy[msk2] = 255. imgcopy = imgcopy.astype('uint8') imgcopy = cv2.cvtColor(imgcopy, cv2.COLOR_GRAY2BGR) if not numpy.allclose(upsample, 1): imgcopy = cv2.resize(imgcopy, dsize=None, fx=upsample, fy=upsample, interpolation=cv2.INTER_LINEAR) if handpose.shape[0] == 16: hpe = ICVLHandposeEvaluation(numpy.zeros((3, 3)), numpy.zeros((3, 3))) elif handpose.shape[0] == 14: hpe = NYUHandposeEvaluation(numpy.zeros((3, 3)), numpy.zeros( (3, 3))) elif handpose.shape[0] == 21: hpe = MSRAHandposeEvaluation(numpy.zeros((3, 3)), numpy.zeros((3, 3))) else: raise ValueError("Invalid number of joints {}".format( handpose.shape[0])) jtI = self.importer.joints3DToImg(handpose) jtI[:, 0:2] -= numpy.asarray([frame.shape[0] // 2, frame.shape[1] // 2]) jtI[:, 0:2] *= upsample jtI[:, 0:2] += numpy.asarray( [imgcopy.shape[0] // 2, imgcopy.shape[1] // 2]) for i in range(handpose.shape[0]): cv2.circle(imgcopy, (jtI[i, 0], jtI[i, 1]), 3, (255, 0, 0), -1) for i in range(len(hpe.jointConnections)): cv2.line(imgcopy, (jtI[hpe.jointConnections[i][0], 0], jtI[hpe.jointConnections[i][0], 1]), (jtI[hpe.jointConnections[i][1], 0], jtI[hpe.jointConnections[i][1], 1]), 255. * hpe.jointConnectionColors[i], 2) # comI = self.importer.joint3DToImg(com3D) # comI[0:2] -= numpy.asarray([frame.shape[0]//2, frame.shape[1]//2]) # comI[0:2] *= upsample # comI[0:2] += numpy.asarray([imgcopy.shape[0]//2, imgcopy.shape[1]//2]) # cv2.circle(imgcopy, (comI[0], comI[1]), 3, (0, 255, 0), 1) poseimg = numpy.zeros_like(imgcopy) # rotate 3D pose and project to 2D jtP = self.importer.joints3DToImg( rotatePoints3D(handpose, handpose[self.importer.crop_joint_idx], 0., 90., 0.)) jtP[:, 0:2] -= numpy.asarray([frame.shape[0] // 2, frame.shape[1] // 2]) jtP[:, 0:2] *= upsample jtP[:, 0:2] += numpy.asarray( [imgcopy.shape[0] // 2, imgcopy.shape[1] // 2]) for i in range(handpose.shape[0]): cv2.circle(poseimg, (jtP[i, 0], jtP[i, 1]), 3, (255, 0, 0), -1) for i in range(len(hpe.jointConnections)): cv2.line(poseimg, (jtP[hpe.jointConnections[i][0], 0], jtP[hpe.jointConnections[i][0], 1]), (jtP[hpe.jointConnections[i][1], 0], jtP[hpe.jointConnections[i][1], 1]), 255. * hpe.jointConnectionColors[i], 2) # comP = self.importer.joint3DToImg(rotatePoint3D(com3D, handpose[self.importer.crop_joint_idx], 0., 90., 0.)) # comP[0:2] -= numpy.asarray([frame.shape[0]//2, frame.shape[1]//2]) # comP[0:2] *= upsample # comP[0:2] += numpy.asarray([imgcopy.shape[0]//2, imgcopy.shape[1]//2]) # cv2.circle(poseimg, (comP[0], comP[1]), 3, (0, 255, 0), 1) return imgcopy, poseimg
assert len(model) == len(weight_num), 'length is not equal!' for ind in xrange(len(model)): joints, file_name = predictJoints(model[ind], weight_num[ind]) pred_joints.append(joints) eval_prefix.append('ICVL_' + model[ind] + '_' + weight_num[ind]) if not os.path.exists('../eval/'+eval_prefix[ind]+'/'): os.makedirs('../eval/'+eval_prefix[ind]+'/') if DEBUG: print 'joints.shape = ', joints.shape print 'joints[0] = ', joints[0] print 'type(joints[0]) = ', type(joints[0]) print 'type(joints[0][0] = ', type(joints[0][0]) hpe.append(ICVLHandposeEvaluation(gt3D, joints)) hpe[ind].subfolder += eval_prefix[ind]+'/' mean_error = hpe[ind].getMeanError() max_error = hpe[ind].getMaxError() print("Test on {}_{}".format(model[ind], weight_num[ind])) print("Mean error: {}mm, max error: {}mm".format(mean_error, max_error)) print("MD score: {}".format(hpe[ind].getMDscore(80))) print("{}".format([hpe[ind].getJointMeanError(j) for j in range(joints[0].shape[0])])) print("{}".format([hpe[ind].getJointMaxError(j) for j in range(joints[0].shape[0])])) print "Testing baseline" ################################# # BASELINE # Load the evaluation di = ICVLImporter('../dataset/ICVL/', cacheDir='../dataset/cache/')