def EvalOneEpoch(self, Loader): batch_cnt = 1 samp_cnt = 0 avg_loss = 0. avg_correct_rate = 0. avg_iou = 0. while True: ## get next batch SuccessFlag, data, seg, weak_seg_onehot, mb_size = Loader.NextBatch_TestSet( ) if not SuccessFlag: break ## Normalize Validation Points # data = PointNet_CAM.pc_max_normalize(data) ## Dummy Variables Mask_bin = np.ones(shape=[mb_size, data.shape[1]], dtype=np.float32) loss_mb, Z_prob_mb = \ self.sess.run([self.loss, self.Z_prob], feed_dict={self.X_ph: data, self.Y_ph: seg, self.Is_Training_ph: True, self.Mask_ph: Mask_bin}) ## Calculate loss and correct rate pred_mb = np.argmax(Z_prob_mb, axis=-1) correct = np.mean(pred_mb == seg) m_iou = np.mean(Tool.IoU(pred_mb, seg, Loader.numParts)) # avg_loss = (batch_cnt - 1) / batch_cnt * avg_loss + (loss_mb / mb_size) / batch_cnt # avg_correct_rate = (batch_cnt - 1) / batch_cnt * avg_correct_rate + correct / batch_cnt avg_loss = (avg_loss * samp_cnt + loss_mb) / (samp_cnt + mb_size) avg_correct_rate = (avg_correct_rate * samp_cnt + correct * mb_size) / (samp_cnt + mb_size) avg_iou = (avg_iou * samp_cnt + m_iou * mb_size) / (samp_cnt + mb_size) samp_cnt += mb_size print( '\rBatch {:d} EvaluatedSamp {:d} Avg Loss {:.4f} Avg Correct Rate {:.3f}% Avg IoU {:.3f}%' .format(batch_cnt, samp_cnt, avg_loss, 100 * avg_correct_rate, 100 * avg_iou), end='') batch_cnt += 1 Loader.ResetLoader_TestSet() return avg_loss, avg_correct_rate, avg_iou
def EvalOneEpoch_Full(self, Loader, Eval): batch_cnt = 1 samp_cnt = 0 avg_loss = 0. avg_correct_rate = 0. avg_iou = 0. while True: ## get next batch SuccessFlag, data, seg, weak_seg_onehot, mb_size = Loader.NextBatch_TestSet( ) if not SuccessFlag: break if mb_size < Loader.batchsize: data_feed = np.concatenate([ data, np.tile(data[np.newaxis, 0, ...], [Loader.batchsize - mb_size, 1, 1]) ], axis=0) seg_feed = np.concatenate([ seg, np.tile(seg[np.newaxis, 0], [Loader.batchsize - mb_size, 1]) ], axis=0) seg_Onehot_feed = Tool.OnehotEncode(seg_feed, 13) else: data_feed = data seg_Onehot_feed = Tool.OnehotEncode(seg, 13) Mask_bin_feed = np.ones(shape=[Loader.batchsize, data.shape[1]], dtype=np.float32) ## Replicate for Siamese Network Input data_feed_rep = [] seg_Onehot_feed_rep = [] Mask_bin_feed_rep = [] for b_i in range(self.BATCH_SIZE): data_feed_rep.append(data_feed[b_i]) data_feed_rep.append(data_feed[b_i]) seg_Onehot_feed_rep.append(seg_Onehot_feed[b_i]) seg_Onehot_feed_rep.append(seg_Onehot_feed[b_i]) Mask_bin_feed_rep.append(Mask_bin_feed[b_i]) Mask_bin_feed_rep.append(Mask_bin_feed[b_i]) data_feed_rep = np.stack(data_feed_rep, axis=0) seg_Onehot_feed_rep = np.stack(seg_Onehot_feed_rep, axis=0) Mask_bin_feed_rep = np.stack(Mask_bin_feed_rep, axis=0) loss_mb, Z_prob_mb = \ self.sess.run([self.loss, self.Z_prob], feed_dict={self.X_ph: data_feed_rep, self.Y_ph: seg_Onehot_feed_rep, self.Is_Training_ph: False, self.Mask_ph: Mask_bin_feed_rep}) Z_prob_mb = Z_prob_mb[0:2 * mb_size:2, ...] ## Calculate loss and correct rate pred_mb = np.argmax(Z_prob_mb, axis=-1) correct = np.mean(pred_mb == seg) m_iou = np.mean(Tool.IoU(pred_mb, seg, Loader.numParts)) avg_loss = (avg_loss * samp_cnt + loss_mb) / (samp_cnt + mb_size) avg_correct_rate = (avg_correct_rate * samp_cnt + correct * mb_size) / (samp_cnt + mb_size) avg_iou = (avg_iou * samp_cnt + m_iou * mb_size) / (samp_cnt + mb_size) samp_cnt += mb_size print( '\rBatch {:d} EvaluatedSamp {:d} Avg Loss {:.4f} Avg Correct Rate {:.3f}% Avg IoU {:.3f}%' .format(batch_cnt, samp_cnt, avg_loss, 100 * avg_correct_rate, 100 * avg_iou), end='') batch_cnt += 1 Loader.ResetLoader_TestSet() return avg_loss, avg_correct_rate, avg_iou