示例#1
0
def visu_caller(path, step_ind, relu_idx):

    ##########################
    # Initiate the environment
    ##########################

    # Choose which gpu to use
    GPU_ID = '0'

    # Set GPU visible device
    os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID

    # Disable warnings
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

    ###########################
    # Load the model parameters
    ###########################

    # Load model parameters
    config = Config()
    config.load(path)

    ##################################
    # Change model parameters for test
    ##################################

    # Change parameters for the test here. For example, you can stop augmenting the input data.

    # No augmentation to avoid random inputs
    config.augment_scale_anisotropic = False
    config.augment_symmetries = [False, False, False]
    config.augment_rotation = 'none'
    config.augment_scale_min = 1.0
    config.augment_scale_max = 1.0
    config.augment_noise = 0.0
    config.augment_occlusion = 'none'
    config.augment_color = 1.0

    config.batch_num = 2
    config.in_radius = 5

    ##############
    # Prepare Data
    ##############

    print()
    print('Dataset Preparation')
    print('*******************')

    # Initiate dataset configuration
    if config.dataset.startswith('ModelNet40'):
        dataset = ModelNet40Dataset(config.input_threads)
    elif config.dataset == 'S3DIS':
        dataset = S3DISDataset(config.input_threads)
        on_val = True
    elif config.dataset == 'Scannet':
        dataset = ScannetDataset(config.input_threads, load_test=True)
    elif config.dataset.startswith('ShapeNetPart'):
        dataset = ShapeNetPartDataset(
            config.dataset.split('_')[1], config.input_threads)
    elif config.dataset == 'NPM3D':
        dataset = NPM3DDataset(config.input_threads, load_test=True)
    elif config.dataset == 'Semantic3D':
        dataset = Semantic3DDataset(config.input_threads)
    else:
        raise ValueError('Unsupported dataset : ' + config.dataset)

    # Create subsample clouds of the models
    dl0 = config.first_subsampling_dl
    dataset.load_subsampled_clouds(dl0)

    # Initiate ERF input pipeleine (only diff is that it is not random)
    dataset.init_ERF_input_pipeline(config)

    ##############
    # Define Model
    ##############

    print('Creating Model')
    print('**************\n')
    t1 = time.time()

    if config.dataset.startswith('ShapeNetPart'):
        model = KernelPointFCNN(dataset.flat_inputs, config)
    elif config.dataset.startswith('S3DIS'):
        model = KernelPointFCNN(dataset.flat_inputs, config)
    elif config.dataset.startswith('Scannet'):
        model = KernelPointFCNN(dataset.flat_inputs, config)
    elif config.dataset.startswith('NPM3D'):
        model = KernelPointFCNN(dataset.flat_inputs, config)
    elif config.dataset.startswith('ModelNet40'):
        model = KernelPointCNN(dataset.flat_inputs, config)
    elif config.dataset.startswith('Semantic3D'):
        model = KernelPointFCNN(dataset.flat_inputs, config)
    else:
        raise ValueError('Unsupported dataset : ' + config.dataset)

    # Find all snapshot in the chosen training folder
    snap_path = os.path.join(path, 'snapshots')
    snap_steps = [
        int(f[:-5].split('-')[-1]) for f in os.listdir(snap_path)
        if f[-5:] == '.meta'
    ]

    # Find which snapshot to restore
    chosen_step = np.sort(snap_steps)[step_ind]
    chosen_snap = os.path.join(path, 'snapshots',
                               'snap-{:d}'.format(chosen_step))

    # Create a tester class
    visualizer = ModelVisualizer(model, restore_snap=chosen_snap)
    t2 = time.time()

    print('\n----------------')
    print('Done in {:.1f} s'.format(t2 - t1))
    print('----------------\n')

    #####################
    # Start visualization
    #####################

    print('Start visualization')
    print('*******************\n')

    visualizer.show_effective_recep_field(model, dataset, relu_idx)
示例#2
0
        else:
            this_dataset = config.dataset
        if plot_dataset:
            if plot_dataset == this_dataset:
                continue
            else:
                raise ValueError(
                    'All logs must share the same dataset to be compared')
        else:
            plot_dataset = this_dataset

    # Plot the training loss and accuracy
    compare_trainings(logs, logs_names)

    # Plot the validation
    if config.dataset_task == 'classification':
        compare_convergences_classif(logs, logs_names)
    elif config.dataset_task == 'cloud_segmentation':
        if config.dataset.startswith('S3DIS'):
            dataset = S3DISDataset(config, load_data=False)
            compare_convergences_segment(dataset, logs, logs_names)
        if config.dataset == 'APRPointCloud':
            dataset = APRPointCloudDataset(config)
            compare_convergences_segment(dataset, logs, logs_names)
    elif config.dataset_task == 'slam_segmentation':
        if config.dataset.startswith('SemanticKitti'):
            dataset = SemanticKittiDataset(config)
            compare_convergences_SLAM(dataset, logs, logs_names)
    else:
        raise ValueError('Unsupported dataset : ' + plot_dataset)
示例#3
0
def visu_caller(path, step_ind, relu_idx, compute_activations):

    # Check if activation have already been computed
    if relu_idx is not None:
        visu_path = os.path.join('visu', 'visu_' + path.split('/')[-1],
                                 'top_activations',
                                 'Relu{:02d}'.format(relu_idx))
        if not os.path.exists(visu_path):
            message = 'No activations found for Relu number {:d} of the model {:s}.'
            print(message.format(relu_idx, path.split('/')[-1]))
            compute_activations = True
        else:
            # Get the list of files
            feature_files = np.sort(
                [f for f in os.listdir(visu_path) if f.endswith('.ply')])
            if len(feature_files) == 0:
                message = 'No activations found for Relu number {:d} of the model {:s}.'
                print(message.format(relu_idx, path.split('/')[-1]))
                compute_activations = True
    else:
        compute_activations = True

    if compute_activations:

        ##########################
        # Initiate the environment
        ##########################

        # Choose which gpu to use
        GPU_ID = '0'

        # Set GPU visible device
        os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID

        # Disable warnings
        os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

        ###########################
        # Load the model parameters
        ###########################

        # Load model parameters
        config = Config()
        config.load(path)

        ##################################
        # Change model parameters for test
        ##################################

        # Change parameters for the test here. For example, you can stop augmenting the input data.

        #config.augment_noise = 0.0001
        #config.augment_symmetries = False

        config.batch_num = 3
        config.in_radius = 4
        config.validation_size = 200

        ##############
        # Prepare Data
        ##############

        print()
        print('Dataset Preparation')
        print('*******************')

        # Initiate dataset configuration
        if config.dataset.startswith('ModelNet40'):
            dataset = ModelNet40Dataset(config.input_threads)
        elif config.dataset == 'S3DIS':
            dataset = S3DISDataset(config.input_threads)
            on_val = True
        elif config.dataset == 'Scannet':
            dataset = ScannetDataset(config.input_threads, load_test=True)
        elif config.dataset.startswith('ShapeNetPart'):
            dataset = ShapeNetPartDataset(
                config.dataset.split('_')[1], config.input_threads)
        elif config.dataset == 'NPM3D':
            dataset = NPM3DDataset(config.input_threads, load_test=True)
        elif config.dataset == 'Semantic3D':
            dataset = Semantic3DDataset(config.input_threads)
        else:
            raise ValueError('Unsupported dataset : ' + config.dataset)

        # Create subsample clouds of the models
        dl0 = config.first_subsampling_dl
        dataset.load_subsampled_clouds(dl0)

        # Initialize input pipelines
        if config.dataset == 'S3DIS':
            dataset.init_input_pipeline(config)
        else:
            dataset.init_test_input_pipeline(config)

        ##############
        # Define Model
        ##############

        print('Creating Model')
        print('**************\n')
        t1 = time.time()

        if config.dataset.startswith('ShapeNetPart'):
            model = KernelPointFCNN(dataset.flat_inputs, config)
        elif config.dataset.startswith('S3DIS'):
            model = KernelPointFCNN(dataset.flat_inputs, config)
        elif config.dataset.startswith('Scannet'):
            model = KernelPointFCNN(dataset.flat_inputs, config)
        elif config.dataset.startswith('NPM3D'):
            model = KernelPointFCNN(dataset.flat_inputs, config)
        elif config.dataset.startswith('ModelNet40'):
            model = KernelPointCNN(dataset.flat_inputs, config)
        elif config.dataset.startswith('Semantic3D'):
            model = KernelPointFCNN(dataset.flat_inputs, config)
        else:
            raise ValueError('Unsupported dataset : ' + config.dataset)

        # Find all snapshot in the chosen training folder
        snap_path = os.path.join(path, 'snapshots')
        snap_steps = [
            int(f[:-5].split('-')[-1]) for f in os.listdir(snap_path)
            if f[-5:] == '.meta'
        ]

        # Find which snapshot to restore
        chosen_step = np.sort(snap_steps)[step_ind]
        chosen_snap = os.path.join(path, 'snapshots',
                                   'snap-{:d}'.format(chosen_step))

        # Create a tester class
        visualizer = ModelVisualizer(model, restore_snap=chosen_snap)
        t2 = time.time()

        print('\n----------------')
        print('Done in {:.1f} s'.format(t2 - t1))
        print('----------------\n')

        #####################
        # Start visualization
        #####################

        print('Start visualization')
        print('*******************\n')

        relu_idx = visualizer.top_relu_activations(model, dataset, relu_idx)

    # Show the computed activations
    ModelVisualizer.show_activation(path, relu_idx)
示例#4
0
def test_caller(path, step_ind, on_val):

    ##########################
    # Initiate the environment
    ##########################

    # Choose which gpu to use
    GPU_ID = '0'

    # Set GPU visible device
    os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID

    # Disable warnings
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0'

    ###########################
    # Load the model parameters
    ###########################

    # Load model parameters
    config = Config()
    config.load(path)

    ##################################
    # Change model parameters for test
    ##################################

    # Change parameters for the test here. For example, you can stop augmenting the input data.

    #config.augment_noise = 0.0001
    #config.augment_color = 1.0
    config.validation_size = 500
    #config.batch_num = 10

    ##############
    # Prepare Data
    ##############

    print()
    print('Dataset Preparation')
    print('*******************')

    # Initiate dataset configuration
    if config.dataset.startswith('ModelNet40'):
        dataset = ModelNet40Dataset(config.input_threads)
    elif config.dataset == 'S3DIS':
        dataset = S3DISDataset(config.input_threads)
        on_val = True
    elif config.dataset == 'Scannet':
        dataset = ScannetDataset(config.input_threads, load_test=(not on_val))
    elif config.dataset.startswith('ShapeNetPart'):
        dataset = ShapeNetPartDataset(
            config.dataset.split('_')[1], config.input_threads)
    elif config.dataset == 'NPM3D':
        dataset = NPM3DDataset(config.input_threads, load_test=(not on_val))
    elif config.dataset == 'Semantic3D':
        dataset = Semantic3DDataset(config.input_threads)
    else:
        raise ValueError('Unsupported dataset : ' + config.dataset)

    # Create subsample clouds of the models
    dl0 = config.first_subsampling_dl
    dataset.load_subsampled_clouds(dl0)

    # Initialize input pipelines
    if on_val:
        dataset.init_input_pipeline(config)
    else:
        dataset.init_test_input_pipeline(config)

    ##############
    # Define Model
    ##############

    print('Creating Model')
    print('**************\n')
    t1 = time.time()

    if config.dataset.startswith('ShapeNetPart'):
        model = KernelPointFCNN(dataset.flat_inputs, config)
    elif config.dataset.startswith('S3DIS'):
        model = KernelPointFCNN(dataset.flat_inputs, config)
    elif config.dataset.startswith('Scannet'):
        model = KernelPointFCNN(dataset.flat_inputs, config)
    elif config.dataset.startswith('NPM3D'):
        model = KernelPointFCNN(dataset.flat_inputs, config)
    elif config.dataset.startswith('ModelNet40'):
        model = KernelPointCNN(dataset.flat_inputs, config)
    elif config.dataset.startswith('Semantic3D'):
        model = KernelPointFCNN(dataset.flat_inputs, config)
    else:
        raise ValueError('Unsupported dataset : ' + config.dataset)

    # Find all snapshot in the chosen training folder
    snap_path = os.path.join(path, 'snapshots')
    snap_steps = [
        int(f[:-5].split('-')[-1]) for f in os.listdir(snap_path)
        if f[-5:] == '.meta'
    ]

    # Find which snapshot to restore
    chosen_step = np.sort(snap_steps)[step_ind]
    chosen_snap = os.path.join(path, 'snapshots',
                               'snap-{:d}'.format(chosen_step))

    # Create a tester class
    tester = ModelTester(model, restore_snap=chosen_snap)
    t2 = time.time()

    print('\n----------------')
    print('Done in {:.1f} s'.format(t2 - t1))
    print('----------------\n')

    ############
    # Start test
    ############

    print('Start Test')
    print('**********\n')

    if config.dataset.startswith('ShapeNetPart'):
        if config.dataset.split('_')[1] == 'multi':
            tester.test_multi_segmentation(model, dataset)
        else:
            tester.test_segmentation(model, dataset)
    elif config.dataset.startswith('S3DIS'):
        tester.test_cloud_segmentation_on_val(model, dataset)
    elif config.dataset.startswith('Scannet'):
        if on_val:
            tester.test_cloud_segmentation_on_val(model, dataset)
        else:
            tester.test_cloud_segmentation(model, dataset)
    elif config.dataset.startswith('Semantic3D'):
        if on_val:
            tester.test_cloud_segmentation_on_val(model, dataset)
        else:
            tester.test_cloud_segmentation(model, dataset)
    elif config.dataset.startswith('NPM3D'):
        if on_val:
            tester.test_cloud_segmentation_on_val(model, dataset)
        else:
            tester.test_cloud_segmentation(model, dataset)
    elif config.dataset.startswith('ModelNet40'):
        tester.test_classification(model, dataset)
    else:
        raise ValueError('Unsupported dataset')
    ###########################
    # Load the model parameters
    ###########################

    config = S3DISConfig()

    ##############
    # Prepare Data
    ##############

    print()
    print('Dataset Preparation')
    print('*******************')

    # Initiate dataset configuration
    dataset = S3DISDataset(config.input_threads)

    # Create subsampled input clouds
    dl0 = config.first_subsampling_dl
    dataset.load_subsampled_clouds(dl0)

    # Initialize input pipelines
    dataset.init_input_pipeline(config)

    # Test the input pipeline alone with this debug function
    # dataset.check_input_pipeline_timing(config)

    ##############
    # Define Model
    ##############
示例#6
0
        if plot_dataset:
            if plot_dataset in config.dataset:
                continue
            else:
                raise ValueError('All logs must share the same dataset to be compared')
        else:
            plot_dataset = config.dataset[:5]

    # Plot the training loss and accuracy
    compare_trainings(logs, logs_names)

    # Plot the validation
    if plot_dataset.startswith('Shape'):
        compare_convergences_multisegment(logs, logs_names)
    elif plot_dataset.startswith('S3DIS'):
        dataset = S3DISDataset()
        compare_convergences_segment(dataset, logs, logs_names)
    elif plot_dataset.startswith('Model'):
        dataset = ModelNet40Dataset()
        compare_convergences_classif(dataset, logs, logs_names)
    elif plot_dataset.startswith('Scann'):
        dataset = ScannetDataset()
        compare_convergences_segment(dataset, logs, logs_names)
    elif plot_dataset.startswith('Seman'):
        dataset = Semantic3DDataset()
        compare_convergences_segment(dataset, logs, logs_names)
    elif plot_dataset.startswith('NPM3D'):
        dataset = NPM3DDataset()
        compare_convergences_segment(dataset, logs, logs_names)
    else:
        raise ValueError('Unsupported dataset : ' + plot_dataset)