def farthestSampling(file_names, NUM_POINT): file_indexs = np.arange(0, len(file_names)) inputData = dict() inputLabel = dict() for index in range (len(file_indexs)): current_data, current_label = utils.loadDataFile(file_names[file_indexs[index]]) current_data = current_data[:,0:NUM_POINT,:] current_label = np.squeeze(current_label) current_label= np.int_(current_label) inputData.update({index : current_data}) inputLabel.update({index : current_label}) return inputData, inputLabel
def uniformSampling(file_names, NUM_POINT): file_indexs = np.arange(0, len(file_names)) inputData = dict() inputLabel = dict() for index in range (len(file_indexs)): current_data, current_label = utils.loadDataFile(file_names[file_indexs[index]]) current_label = np.squeeze(current_label) current_label= np.int_(current_label) output = np.zeros((len(current_data), NUM_POINT, 3)) for i,object_xyz in enumerate (current_data): samples_index=np.random.choice(2048, NUM_POINT, replace=False) output[i] = object_xyz[samples_index] inputData.update({index : output}) inputLabel.update({index : current_label}) return inputData, inputLabel
def farthestSampling(file_names, NUM_POINT): #Description: load point cloud data into dictionary format {key: batch index, value: batch data or labels} # containing each batch of the data (data prepared by farthest sampling) #input: (1)file name (2) point number #return: (1) input data dictionary (2) input data label file_indexs = np.arange(0, len(file_names)) inputData = dict() inputLabel = dict() for index in range (len(file_indexs)): current_data, current_label = utils.loadDataFile(file_names[file_indexs[index]]) current_data = current_data[:,0:NUM_POINT,:] current_label = np.squeeze(current_label) current_label= np.int_(current_label) inputData.update({index : current_data}) inputLabel.update({index : current_label}) return inputData, inputLabel
def uniformSampling(file_names, NUM_POINT): #Description: load point cloud data into dictionary format {key: batch index, value: batch data or labels} # containing each batch of the data (data prepared by uniform sampling) #input: (1)file name (2) point number #return: (1) input data dictionary (2) input data label file_indexs = np.arange(0, len(file_names)) inputData = dict() inputLabel = dict() for index in range (len(file_indexs)): current_data, current_label = utils.loadDataFile(file_names[file_indexs[index]]) current_label = np.squeeze(current_label) current_label= np.int_(current_label) output = np.zeros((len(current_data), NUM_POINT, 3)) for i,object_xyz in enumerate (current_data): samples_index=np.random.choice(2048, NUM_POINT, replace=False) #随机从2048个点中取NUM_POINT个点的ID output[i] = object_xyz[samples_index] inputData.update({index : output}) inputLabel.update({index : current_label}) return inputData, inputLabel
def uniformSampling(file_names, NUM_POINT): #Description: load point cloud data into dictionary format {key: batch index, value: batch data or labels} # containing each batch of the data (data prepared by uniform sampling) #input: (1)file name (2) point number #return: (1) input data dictionary (2) input data label file_indexs = np.arange(0, len(file_names)) inputData = dict() inputLabel = dict() for index in range (len(file_indexs)): current_data, current_label = utils.loadDataFile(file_names[file_indexs[index]]) current_label = np.squeeze(current_label) current_label= np.int_(current_label) output = np.zeros((len(current_data), NUM_POINT, 3)) for i,object_xyz in enumerate (current_data): samples_index=np.random.choice(2048, NUM_POINT, replace=False) output[i] = object_xyz[samples_index] inputData.update({index : output}) inputLabel.update({index : current_label}) return inputData, inputLabel