def visu_caller(path, step_ind, relu_idx): ########################## # Initiate the environment ########################## # Choose which gpu to use GPU_ID = '0' # Set GPU visible device os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID # Disable warnings os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' ########################### # Load the model parameters ########################### # Load model parameters config = Config() config.load(path) ################################## # Change model parameters for test ################################## # Change parameters for the test here. For example, you can stop augmenting the input data. # No augmentation to avoid random inputs config.augment_scale_anisotropic = False config.augment_symmetries = [False, False, False] config.augment_rotation = 'none' config.augment_scale_min = 1.0 config.augment_scale_max = 1.0 config.augment_noise = 0.0 config.augment_occlusion = 'none' config.augment_color = 1.0 config.batch_num = 2 config.in_radius = 5 ############## # Prepare Data ############## print() print('Dataset Preparation') print('*******************') # Initiate dataset configuration if config.dataset.startswith('ModelNet40'): dataset = ModelNet40Dataset(config.input_threads) elif config.dataset == 'S3DIS': dataset = S3DISDataset(config.input_threads) on_val = True elif config.dataset == 'Scannet': dataset = ScannetDataset(config.input_threads, load_test=True) elif config.dataset.startswith('ShapeNetPart'): dataset = ShapeNetPartDataset( config.dataset.split('_')[1], config.input_threads) elif config.dataset == 'NPM3D': dataset = NPM3DDataset(config.input_threads, load_test=True) elif config.dataset == 'Semantic3D': dataset = Semantic3DDataset(config.input_threads) else: raise ValueError('Unsupported dataset : ' + config.dataset) # Create subsample clouds of the models dl0 = config.first_subsampling_dl dataset.load_subsampled_clouds(dl0) # Initiate ERF input pipeleine (only diff is that it is not random) dataset.init_ERF_input_pipeline(config) ############## # Define Model ############## print('Creating Model') print('**************\n') t1 = time.time() if config.dataset.startswith('ShapeNetPart'): model = KernelPointFCNN(dataset.flat_inputs, config) elif config.dataset.startswith('S3DIS'): model = KernelPointFCNN(dataset.flat_inputs, config) elif config.dataset.startswith('Scannet'): model = KernelPointFCNN(dataset.flat_inputs, config) elif config.dataset.startswith('NPM3D'): model = KernelPointFCNN(dataset.flat_inputs, config) elif config.dataset.startswith('ModelNet40'): model = KernelPointCNN(dataset.flat_inputs, config) elif config.dataset.startswith('Semantic3D'): model = KernelPointFCNN(dataset.flat_inputs, config) else: raise ValueError('Unsupported dataset : ' + config.dataset) # Find all snapshot in the chosen training folder snap_path = os.path.join(path, 'snapshots') snap_steps = [ int(f[:-5].split('-')[-1]) for f in os.listdir(snap_path) if f[-5:] == '.meta' ] # Find which snapshot to restore chosen_step = np.sort(snap_steps)[step_ind] chosen_snap = os.path.join(path, 'snapshots', 'snap-{:d}'.format(chosen_step)) # Create a tester class visualizer = ModelVisualizer(model, restore_snap=chosen_snap) t2 = time.time() print('\n----------------') print('Done in {:.1f} s'.format(t2 - t1)) print('----------------\n') ##################### # Start visualization ##################### print('Start visualization') print('*******************\n') visualizer.show_effective_recep_field(model, dataset, relu_idx)
num_workers=config.input_threads, pin_memory=True) # Calibrate samplers test_sampler.calibration(test_loader, verbose=True) print('\nModel Preparation') print('*****************') # Define network model t1 = time.time() if config.dataset_task == 'classification': net = KPCNN(config) elif config.dataset_task in ['cloud_segmentation', 'slam_segmentation']: net = KPFCNN(config, test_dataset.label_values, test_dataset.ignored_labels) else: raise ValueError('Unsupported dataset_task for deformation visu: ' + config.dataset_task) # Define a visualizer class visualizer = ModelVisualizer(net, config, chkp_path=chosen_chkp, on_gpu=False) print('Done in {:.1f}s\n'.format(time.time() - t1)) print('\nStart visualization') print('*******************') # Training visualizer.show_deformable_kernels(net, test_loader, config, deform_idx)
def visu_caller(path, step_ind, relu_idx, compute_activations): # Check if activation have already been computed if relu_idx is not None: visu_path = os.path.join('visu', 'visu_' + path.split('/')[-1], 'top_activations', 'Relu{:02d}'.format(relu_idx)) if not os.path.exists(visu_path): message = 'No activations found for Relu number {:d} of the model {:s}.' print(message.format(relu_idx, path.split('/')[-1])) compute_activations = True else: # Get the list of files feature_files = np.sort( [f for f in os.listdir(visu_path) if f.endswith('.ply')]) if len(feature_files) == 0: message = 'No activations found for Relu number {:d} of the model {:s}.' print(message.format(relu_idx, path.split('/')[-1])) compute_activations = True else: compute_activations = True if compute_activations: ########################## # Initiate the environment ########################## # Choose which gpu to use GPU_ID = '0' # Set GPU visible device os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID # Disable warnings os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' ########################### # Load the model parameters ########################### # Load model parameters config = Config() config.load(path) ################################## # Change model parameters for test ################################## # Change parameters for the test here. For example, you can stop augmenting the input data. #config.augment_noise = 0.0001 #config.augment_symmetries = False config.batch_num = 3 config.in_radius = 4 config.validation_size = 200 ############## # Prepare Data ############## print() print('Dataset Preparation') print('*******************') # Initiate dataset configuration if config.dataset.startswith('ModelNet40'): dataset = ModelNet40Dataset(config.input_threads) elif config.dataset == 'S3DIS': dataset = S3DISDataset(config.input_threads) on_val = True elif config.dataset == 'Scannet': dataset = ScannetDataset(config.input_threads, load_test=True) elif config.dataset.startswith('ShapeNetPart'): dataset = ShapeNetPartDataset( config.dataset.split('_')[1], config.input_threads) elif config.dataset == 'NPM3D': dataset = NPM3DDataset(config.input_threads, load_test=True) elif config.dataset == 'Semantic3D': dataset = Semantic3DDataset(config.input_threads) else: raise ValueError('Unsupported dataset : ' + config.dataset) # Create subsample clouds of the models dl0 = config.first_subsampling_dl dataset.load_subsampled_clouds(dl0) # Initialize input pipelines if config.dataset == 'S3DIS': dataset.init_input_pipeline(config) else: dataset.init_test_input_pipeline(config) ############## # Define Model ############## print('Creating Model') print('**************\n') t1 = time.time() if config.dataset.startswith('ShapeNetPart'): model = KernelPointFCNN(dataset.flat_inputs, config) elif config.dataset.startswith('S3DIS'): model = KernelPointFCNN(dataset.flat_inputs, config) elif config.dataset.startswith('Scannet'): model = KernelPointFCNN(dataset.flat_inputs, config) elif config.dataset.startswith('NPM3D'): model = KernelPointFCNN(dataset.flat_inputs, config) elif config.dataset.startswith('ModelNet40'): model = KernelPointCNN(dataset.flat_inputs, config) elif config.dataset.startswith('Semantic3D'): model = KernelPointFCNN(dataset.flat_inputs, config) else: raise ValueError('Unsupported dataset : ' + config.dataset) # Find all snapshot in the chosen training folder snap_path = os.path.join(path, 'snapshots') snap_steps = [ int(f[:-5].split('-')[-1]) for f in os.listdir(snap_path) if f[-5:] == '.meta' ] # Find which snapshot to restore chosen_step = np.sort(snap_steps)[step_ind] chosen_snap = os.path.join(path, 'snapshots', 'snap-{:d}'.format(chosen_step)) # Create a tester class visualizer = ModelVisualizer(model, restore_snap=chosen_snap) t2 = time.time() print('\n----------------') print('Done in {:.1f} s'.format(t2 - t1)) print('----------------\n') ##################### # Start visualization ##################### print('Start visualization') print('*******************\n') relu_idx = visualizer.top_relu_activations(model, dataset, relu_idx) # Show the computed activations ModelVisualizer.show_activation(path, relu_idx)
def visu_caller(path, step_ind, deform_idx, dataset_path): ########################## # Initiate the environment ########################## # Choose which gpu to use GPU_ID = '0' # Set GPU visible device os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID # Disable warnings os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' ########################### # Load the model parameters ########################### # Load model parameters config = Config() config.load(path) ################################## # Change model parameters for test ################################## # Change parameters for the test here. For example, you can stop augmenting the input data. #config.augment_noise = 0.0001 #config.augment_symmetries = False # config.batch_num = 3 config.in_radius = 4 ############## # Prepare Data ############## print() print('Dataset Preparation') print('*******************') # Initiate dataset configuration dl0 = 0 # config.first_subsampling_dl if config.dataset.startswith("pc_shapenetCompletionBenchmark2048"): dataset = ShapeNetBenchmark2048Dataset(config.batch_num, config.num_input_points, dataset_path) # Create subsample clouds of the models dataset.load_subsampled_clouds(dl0) else: raise ValueError('Unsupported dataset : ' + config.dataset) # Initialize input pipelines dataset.init_test_input_pipeline(config) ############## # Define Model ############## print('Creating Model') print('**************\n') t1 = time.time() if config.dataset.startswith('ShapeNetV1') or config.dataset.startswith( "pc_shapenetCompletionBenchmark2048"): model = KernelPointCompletionNetwork(dataset.flat_inputs, config, False) else: raise ValueError('Unsupported dataset : ' + config.dataset) # Find all snapshot in the chosen training folder snap_path = os.path.join(path, 'snapshots') snap_steps = [ int(f[:-5].split('-')[-1]) for f in os.listdir(snap_path) if f[-5:] == '.meta' ] # Find which snapshot to restore if step_ind == -1: chosen_step = np.sort(snap_steps)[step_ind] else: chosen_step = step_ind + 1 chosen_snap = os.path.join(path, 'snapshots', 'snap-{:d}'.format(chosen_step)) # Create a tester class visualizer = ModelVisualizer(model, restore_snap=chosen_snap) t2 = time.time() print('\n----------------') print('Done in {:.1f} s'.format(t2 - t1)) print('----------------\n') ##################### # Start visualization ##################### print('Start visualization') print('*******************\n') visualizer.show_deformable_kernels(model, dataset, deform_idx)