def __init__(self, ply_dir, device, Normalize_=None, is_full_size=False, scale=None): ply_numpy_list = [] ply_numpy_list_lable = [] for fil in ply_dir: pc = ObjLoader.load_point_cloud(fil) pc.vertices = pc.vertices.astype('float16') #pc.colors = pc.colors.astype('float16') if scale: ply_numpy_list.append( NNdataProcess.transform_scaling(pc.vertices, Original_dimension=1024, scale_factor=scale)) else: ply_numpy_list.append( NNdataProcess.transform( pc.vertices, is_full_size=is_full_size)) #,pc.colors)) self.Tensor_ply_list = torch.from_numpy(np.array(ply_numpy_list)).type( torch.FloatTensor).to(device)
def tensor_to_ply(tensor, threshold, save_dir, scale_factor=None, Original_dimension=1024): return NNdataProcess.tensor_to_ply(tensor, threshold, save_dir, scale_factor=scale_factor, Original_dimension=Original_dimension)
def Create_Dataset(): raw_ply = config.pathToLongdress2Ply lable_dir = config.pathToLongdress2Compressed_Ply save_dir = config.pathToLongdress2RNNTrain NNdataProcess.create_training_sequence_list_wise_for_RNN(raw_ply, lable_dir, save_dir)
def get_training_data(self, testPart, data_dir): return NNdataProcess.get_data(testPart=testPart, data_dir=data_dir, is_return_dir=True)