def storeTestResults(mode, total_correct, total_seen, loss_sum, pred_val): ''' This function stores the test data into seperate files for later retrieval. ''' curShape = getShapeName(desiredClassLabel) savePath = os.path.join(os.path.split(__file__)[0], "testdata", curShape) if not os.path.exists(savePath): os.makedirs(savePath) mean_loss = loss_sum / float(total_seen) accuracy = total_correct / float(total_seen) filePath = os.path.join( savePath, curShape + "_" + str(numTestRuns) + "_" + str(mode) + "_meanloss") print("STORING FILES TO: ", filePath) tdh.writeResult(filePath, mean_loss) filePath = os.path.join( savePath, curShape + "_" + str(numTestRuns) + "_" + str(mode) + "_accuracy") print("STORING FILES TO: ", filePath) tdh.writeResult(filePath, accuracy) filePath = os.path.join( savePath, curShape + "_" + str(numTestRuns) + "_" + str(mode) + "_prediction") print("STORING FILES TO: ", filePath) tdh.writeResult(filePath, pred_val) log_string('eval mean loss: %f' % mean_loss) log_string('eval accuracy: %f' % accuracy)
def storeAccuracyPerPointsRemoved(accuracy): ''' This function stores the amount of poinst removed per iteration. ''' curShape = getShapeName(desiredClassLabel) savePath = os.path.join( os.path.split(__file__)[0], "testdata", "saliency_maps_ppi") if not os.path.exists(savePath): os.makedirs(savePath) filePath = os.path.join(savePath, curShape + "_accuracy") tdh.writeResult(filePath, accuracy)
def storeAmountOfUsedTime(usedTime): ''' This function stores the amount of total time used per object. ''' curShape = getShapeName(desiredClassLabel) savePath = os.path.join( os.path.split(__file__)[0], "testdata", "p-grad-CAM_performance") if not os.path.exists(savePath): os.makedirs(savePath) filePath = os.path.join(savePath, curShape) print("STORING FILES TO: ", filePath) tdh.writeResult(filePath, usedTime)
def storeAmountOfPointsRemoved(numPointsRemoved): ''' This function stores the amount of points removed per iteration. ''' curShape = getShapeName(desiredClassLabel) savePath = os.path.join( os.path.split(__file__)[0], "testdata", "p-grad-CAM_ppi") if not os.path.exists(savePath): os.makedirs(savePath) filePath = os.path.join(savePath, curShape + "_points_removed") print("STORING FILES TO: ", filePath) tdh.writeResult(filePath, numPointsRemoved)
def drop_and_store_results(self, pointclouds_pl, labels_pl, sess, poolingMode, thresholdMode, numDeletePoints=None): # Some profiling import time start_time = time.time() cpr.startProfiling() pcTempResult = pointclouds_pl.copy() delCount = [] vipPcPointsArr = [] weightArray = [] i = 0 # Multiply the class activation vector with a one hot vector to look only at the classes of interest. class_activation_vector = tf.multiply( self.pred, tf.one_hot(indices=desiredClassLabel, depth=40)) while True: i += 1 print("ITERATION: ", i) # Setup feed dict for current iteration feed_dict = { self.pointclouds_pl: pcTempResult, self.labels_pl: labels_pl, self.is_training_pl: self.is_training } maxgradients = self.getGradient(sess, poolingMode, class_activation_vector, feed_dict) ops = { 'pred': self.pred, 'loss': self.classify_loss, 'maxgradients': maxgradients } # =================================================================== # Evaluate over n batches now to get the accuracy for this iteration. # =================================================================== total_correct = 0 total_seen = 0 loss_sum = 0 pcEvalTest = copy.deepcopy(pcTempResult) for _ in range(numTestRuns): pcEvalTest = provider.rotate_point_cloud_XYZ(pcEvalTest) feed_dict2 = { self.pointclouds_pl: pcEvalTest, self.labels_pl: labels_pl, self.is_training_pl: self.is_training } eval_prediction, eval_loss, heatGradient = sess.run( [ops['pred'], ops['loss'], ops['maxgradients']], feed_dict=feed_dict2) eval_prediction = np.argmax(eval_prediction, 1) correct = np.sum(eval_prediction == labels_pl) total_correct += correct total_seen += 1 loss_sum += eval_loss * BATCH_SIZE print("GROUND TRUTH: ", getShapeName(desiredClassLabel)) print("PREDICTION: ", getPrediction(eval_prediction)) print("LOSS: ", eval_loss) print("ACCURACY: ", (total_correct / total_seen)) accuracy = total_correct / float(total_seen) # Store data now if desired if storeResults: curRemainingPoints = NUM_POINT - sum(delCount) storeAmountOfPointsRemoved(curRemainingPoints) storeAccuracyPerPointsRemoved(accuracy) # Stop iterating when the eval_prediction deviates from ground truth if desiredClassLabel != eval_prediction and accuracy <= 0.5: print( "GROUND TRUTH DEVIATED FROM PREDICTION AFTER %s ITERATIONS" % i) break # Perform visual stuff here if thresholdMode == "+average" or thresholdMode == "+median" or thresholdMode == "+midrange": resultPCloudThresh, vipPointsArr, Count = gch.delete_above_threshold( heatGradient, pcTempResult, thresholdMode) if thresholdMode == "-average" or thresholdMode == "-median" or thresholdMode == "-midrange": resultPCloudThresh, vipPointsArr, Count = gch.delete_below_threshold( heatGradient, pcTempResult, thresholdMode) if thresholdMode == "nonzero": resultPCloudThresh, vipPointsArr, Count = gch.delete_all_nonzeros( heatGradient, pcTempResult) if thresholdMode == "zero": resultPCloudThresh, vipPointsArr, Count = gch.delete_all_zeros( heatGradient, pcTempResult) if thresholdMode == "+random" or thresholdMode == "-random": resultPCloudThresh, vipPointsArr = gch.delete_random_points( heatGradient, pcTempResult, numDeletePoints[i]) Count = numDeletePoints[i] print("REMOVING %s POINTS." % Count) delCount.append(Count) vipPcPointsArr.extend(vipPointsArr[0]) weightArray.extend(vipPointsArr[1]) pcTempResult = copy.deepcopy(resultPCloudThresh) # Stop profiling and show the results endTime = time.time() - start_time storeAmountOfUsedTime(endTime) cpr.stopProfiling(numResults=20) print("TIME NEEDED FOR ALGORITHM: ", endTime) totalRemoved = sum(delCount) print("TOTAL REMOVED POINTS: ", totalRemoved) print("TOTAL REMAINING POINTS: ", NUM_POINT - totalRemoved) # gch.draw_pointcloud(pcTempResult) #-- Residual point cloud # gch.draw_NewHeatcloud(vipPcPointsArr, weightArray) #-- Important points only vipPcPointsArr.extend(pcTempResult[0]) gch.draw_NewHeatcloud(vipPcPointsArr, weightArray) # --All points combined return delCount
def drop_points(self, pointclouds_pl, labels_pl, sess): # Some profiling import time start_time = time.time() cpr.startProfiling() pointclouds_pl_adv = pointclouds_pl.copy() heatmap = np.zeros((pointclouds_pl_adv.shape[1]), dtype=float) residualPCArr = [] counter = 0 i = 0 while True: i += 1 # =================================================================== # ADAPTIVE EVALUATION AND TERMINATION # =================================================================== ops = {'pred': self.pred, 'loss': self.classify_loss} total_correct = 0 total_seen = 0 loss_sum = 0 pcEvalTest = pointclouds_pl_adv.copy() for _ in range(evalCycles): pcEvalTest = provider.rotate_point_cloud_XYZ(pcEvalTest) feed_dict2 = { self.pointclouds_pl: pcEvalTest, self.labels_pl: labels_pl, self.is_training_pl: self.is_training } eval_prediction, eval_loss = sess.run( [ops['pred'], ops['loss']], feed_dict=feed_dict2) eval_prediction = np.argmax(eval_prediction, 1) correct = np.sum(eval_prediction == labels_pl) total_correct += correct total_seen += 1 loss_sum += eval_loss * BATCH_SIZE print("GROUND TRUTH: ", getShapeName(desiredClassLabel)) print("PREDICTION: ", getPrediction(eval_prediction)) print("LOSS: ", eval_loss) print("ACCURACY: ", (total_correct / total_seen)) accuracy = total_correct / float(total_seen) # Store results now. if storeResults: totalRemainingPoints = NUM_POINT - counter storeAmountOfPointsRemoved(totalRemainingPoints) storeAccuracyPerPointsRemoved(accuracy) # Stop iterating when the eval_prediction deviates from ground truth if desiredClassLabel != eval_prediction and accuracy <= 0.5: print( "GROUND TRUTH DEVIATED FROM PREDICTION AFTER %s ITERATIONS" % i) break # =================================================================== # PERFORM COMPUTATION # =================================================================== grad = sess.run(self.grad, feed_dict={ self.pointclouds_pl: pointclouds_pl_adv, self.labels_pl: labels_pl, self.is_training_pl: self.is_training }) # change the grad into spherical axis and compute r*dL/dr ## mean value # sphere_core = np.sum(pointclouds_pl_adv, axis=1, keepdims=True)/float(pointclouds_pl_adv.shape[1]) ## median value sphere_core = np.median(pointclouds_pl_adv, axis=1, keepdims=True) sphere_r = np.sqrt( np.sum(np.square(pointclouds_pl_adv - sphere_core), axis=2)) ## BxN sphere_axis = pointclouds_pl_adv - sphere_core ## BxNx3 if FLAGS.drop_neg: sphere_map = np.multiply( np.sum(np.multiply(grad, sphere_axis), axis=2), np.power(sphere_r, FLAGS.power)) else: sphere_map = -np.multiply( np.sum(np.multiply(grad, sphere_axis), axis=2), np.power(sphere_r, FLAGS.power)) drop_indice = np.argpartition(sphere_map, kth=sphere_map.shape[1] - self.a, axis=1)[:, -self.a:] tmp = np.zeros((pointclouds_pl_adv.shape[0], pointclouds_pl_adv.shape[1] - self.a, 3), dtype=float) for j in range(pointclouds_pl.shape[0]): for dropIndex in drop_indice[j]: residualPCArr.append(pointclouds_pl_adv[j][dropIndex]) heatmap[counter] = sphere_map[0][dropIndex] counter += 1 tmp[j] = np.delete(pointclouds_pl_adv[j], drop_indice[j], axis=0) # along N points to delete pointclouds_pl_adv = tmp.copy() residualPCArr.extend(pointclouds_pl_adv[0]) # Stop profiling and show the results endTime = time.time() - start_time storeAmountOfUsedTime(endTime) cpr.stopProfiling(numResults=20) print("POINTS DROPPED: ", counter) gch.draw_NewHeatcloud(residualPCArr, heatmap) gch.draw_pointcloud(pointclouds_pl_adv) return pointclouds_pl_adv