def run_eval(self,epoch): val_data = MPIIDataGen(jsonfile='../../data/mpii/mpii_annotations.json',imgpath='../../data/mpii/images',inres=self.inres,outres=self.outres,is_train=False) total_success , total_fail = 0 ,0 threshold = 0.5 count = 0 batch_size = 8 for _img , _gthmap , _meta in val_data.generator(batch_size,8,sigma=2,is_shuffle=False,with_meta=True): count += batch_size if count > val_data.get_dataset_size(): break out = self.model.predict(_img) suc,bad = cal_heatmap_acc(out[-1],_meta,threshold) total_success +=suc total_fail += bad acc = total_success * 1.0 / (total_success+total_fail) print('Eval Accuracy ',acc,' @ Epoch ',epoch) with open(os.path.join(self.get_folder_path(),'val.txt'),'a+') as xfile: xfile.write('Epoch '+ str(epoch) + ':' + str(acc) +'\n')
def main_test(): xnet = HourglassNet(16, 8, (256, 256), (64, 64)) xnet.load_model("../../trained_models/hg_s8_b1_sigma1/net_arch.json", "../../trained_models/hg_s8_b1_sigma1/weights_epoch22.h5") valdata = MPIIDataGen("../../data/mpii/mpii_annotations.json", "../../data/mpii/images", inres=(256, 256), outres=(64, 64), is_train=False) total_good, total_fail = 0, 0 threshold = 0.5 print('val data size', valdata.get_dataset_size()) count = 0 batch_size = 8 for _img, _gthmap, _meta in valdata.tt_generator(batch_size, 8, sigma=2, is_shuffle=False , with_meta=True): count += batch_size if count % (batch_size*100) == 0: print(count, 'processed', total_good, total_fail) if count > valdata.get_dataset_size(): break out = xnet.model.predict(_img) good, bad = cal_heatmap_acc(out[-1], _meta, threshold) total_good += good total_fail += bad print(total_good, total_fail, threshold, total_good*1.0/(total_good + total_fail))
def run_eval(self, epoch): # dataset_path = os.path.join('D:\\', 'nyu_croped') # dataset_path = '/home/tomas_bordac/nyu_croped' dataset_path = config_reader.load_path() valdata = NYUHandDataGen('joint_data.mat', dataset_path, inres=self.inres, outres=self.outres, is_train=False, is_testtrain=False) total_suc, total_fail = 0, 0 threshold = 0.5 count = 0 batch_size = 8 for _img, _gthmap, _meta in valdata.generator(batch_size, 2, sigma=3, is_shuffle=False, with_meta=True): count += batch_size if count > valdata.get_dataset_size(): break out = self.model.predict(_img) suc, bad = cal_heatmap_acc(out[-1], _meta, threshold) total_suc += suc total_fail += bad acc = total_suc * 1.0 / (total_fail + total_suc) print('Eval Accuray ', acc, '@ Epoch ', epoch) with open(os.path.join(self.get_folder_path(), 'val.txt'), 'a+') as xfile: xfile.write('Epoch ' + str(epoch) + ':' + str(acc) + '\n')
def run_eval(self, epoch): valdata = MPIIDataGen( "/home/mike/Documents/stacked_hourglass_tf2/data/mpii/mpii_annotations.json", "/home/mike/datasets/mpii_human_pose_v1/images", inres=self.inres, outres=self.outres, is_train=False) total_suc, total_fail = 0, 0 threshold = 0.5 count = 0 batch_size = 8 for _img, _gthmap, _meta in valdata.generator(batch_size, 8, sigma=2, is_shuffle=False, with_meta=True): count += batch_size if count > valdata.get_dataset_size(): break out = self.model.predict(_img) suc, bad = cal_heatmap_acc(out[-1], _meta, threshold) total_suc += suc total_fail += bad acc = total_suc * 1.0 / (total_fail + total_suc) print('Eval Accuray ', acc, '@ Epoch ', epoch) with open(os.path.join(self.get_folder_path(), 'val.txt'), 'a+') as xfile: xfile.write('Epoch ' + str(epoch) + ':' + str(acc) + '\n')