コード例 #1
0
def generate_and_print_model_summary(config: ModelConfigBase, model: DeviceAwareModule) -> None:
    """
    Writes a human readable summary of the present model to logging.info, and logs the number of trainable
    parameters to AzureML.

    :param config: The configuration for the model.
    :param model: The instantiated Pytorch model.
    """
    random_state = RandomStateSnapshot.snapshot_random_state()
    # There appears to be a bug in apex, where previous use (in training for example) causes problems
    # when another model is later built on the CPU (for example, before loading from a checkpoint)
    # https://github.com/NVIDIA/apex/issues/694
    # Hence, move the model to the GPU before doing model summary.
    if config.use_gpu:
        model = model.cuda()
    if isinstance(config, ScalarModelBase):
        # To generate the model summary, read the first item of the dataset. Then use the model's own
        # get_model_input function to convert the dataset item to input tensors, and feed them through the model.
        train_dataset = config.get_torch_dataset_for_inference(ModelExecutionMode.TRAIN)
        train_item_0 = next(iter(train_dataset.as_data_loader(shuffle=False, batch_size=1, num_dataload_workers=0)))
        model_inputs = get_scalar_model_inputs_and_labels(config, model, train_item_0).model_inputs
        # The model inputs may already be converted to float16, assuming that we would do mixed precision.
        # However, the model is not yet converted to float16 when this function is called, hence convert back to float32
        summary = ModelSummary(model)
        summary.generate_summary(input_tensors=model_inputs, log_summaries_to_files=config.log_summaries_to_files)
    elif config.is_segmentation_model:
        summary_for_segmentation_models(config, model)
        assert model.summarizer
        summary = model.summarizer  # type: ignore
    else:
        raise ValueError("Don't know how to generate a summary for this type of model?")
    RUN_CONTEXT.log(LoggingColumns.NumTrainableParameters, summary.n_trainable_params)
    random_state.restore_random_state()
コード例 #2
0
def test_model_summary_on_classification2() -> None:
    image_channels = 2
    model = ImageEncoderWithMlp(imaging_feature_type=ImagingFeatureType.Segmentation,
                                encode_channels_jointly=False,
                                num_encoder_blocks=3,
                                initial_feature_channels=2,
                                num_image_channels=image_channels,
                                mlp_dropout=0.5)
    summarizer = ModelSummary(model)
    summarizer.generate_summary(input_sizes=[(image_channels * HDF5_NUM_SEGMENTATION_CLASSES, 6, 32, 32)])
    assert summarizer.n_params != 0
    assert summarizer.n_trainable_params != 0
コード例 #3
0
def test_unet_summary_generation() -> None:
    """Checks unet summary generation works either in CPU or GPU"""
    model = UNet3D(input_image_channels=1,
                   initial_feature_channels=2,
                   num_classes=2,
                   kernel_size=1,
                   num_downsampling_paths=2)
    if machine_has_gpu:
        model.cuda()
    summary = ModelSummary(model=model).generate_summary(input_sizes=[(1, 4, 4, 4)])
    assert summary is not None
コード例 #4
0
 def generate_model_summary(self,
                            crop_size: Optional[TupleInt3] = None,
                            log_summaries_to_files: bool = False) -> None:
     """
     Stores a model summary, containing information about layers, memory consumption and runtime
     in the model.summary field.
     When called again with the same crop_size, the summary is not created again.
     :param crop_size: The crop size for which the summary should be created. If not provided,
     the minimum allowed crop size is used.
     :param log_summaries_to_files: whether to write the summary to a file
     """
     if crop_size is None:
         crop_size = self.crop_size_constraints.minimum_size  # type: ignore
         assert crop_size is not None
     input_size = [crop_size]
     if self.summary is None or self.summary_crop_size != input_size:
         self.summarizer = ModelSummary(self)
         self.summary = self.summarizer.generate_summary(
             input_sizes=[(self.input_channels, *crop_size)],
             log_summaries_to_files=log_summaries_to_files)
         self.summary_crop_size = crop_size
コード例 #5
0
def test_unet_model_parallel() -> None:
    """Checks model parallel utilises all the available GPU devices for forward pass"""
    if no_gpu_available:
        logging.warning("CUDA capable GPU is not available - UNet Model Parallel cannot be tested")
        return
    model = UNet3D(input_image_channels=1,
                   initial_feature_channels=2,
                   num_classes=2,
                   kernel_size=1,
                   num_downsampling_paths=2).cuda()
    # Partition the network across all available gpu
    available_devices = [torch.device('cuda:{}'.format(ii)) for ii in range(torch.cuda.device_count())]
    model.generate_model_summary()
    model.partition_model(devices=available_devices)

    # Verify that all the devices are utilised by the layers of the model
    summary = ModelSummary(model=model).generate_summary(input_sizes=[(1, 4, 4, 4)])
    layer_devices = set()
    for layer_summary in summary.values():
        if layer_summary.device:
            layer_devices.add(layer_summary.device)

    assert layer_devices == set(available_devices)
コード例 #6
0
def test_model_summary_on_classification1() -> None:
    model = create_model_with_temperature_scaling(GlaucomaPublic())
    ModelSummary(model).generate_summary(input_sizes=[(1, 6, 64, 60)])
コード例 #7
0
def test_image_encoder(test_output_dirs: OutputFolderForTests,
                       encode_channels_jointly: bool,
                       use_non_imaging_features: bool,
                       kernel_size_per_encoding_block: Optional[Union[TupleInt3, List[TupleInt3]]],
                       stride_size_per_encoding_block: Optional[Union[TupleInt3, List[TupleInt3]]],
                       reduction_factor: float,
                       expected_num_reduced_features: int,
                       aggregation_type: AggregationType) -> None:
    """
    Test if the image encoder networks can be trained without errors (including GradCam computation and data
    augmentation).
    """
    logging_to_stdout()
    set_random_seed(0)
    dataset_folder = Path(test_output_dirs.make_sub_dir("dataset"))
    scan_size = (6, 64, 60)
    scan_files: List[str] = []
    for s in range(4):
        random_scan = np.random.uniform(0, 1, scan_size)
        scan_file_name = f"scan{s + 1}{NumpyFile.NUMPY.value}"
        np.save(str(dataset_folder / scan_file_name), random_scan)
        scan_files.append(scan_file_name)

    dataset_contents = """subject,channel,path,label,numerical1,numerical2,categorical1,categorical2
S1,week0,scan1.npy,,1,10,Male,Val1
S1,week1,scan2.npy,True,2,20,Female,Val2
S2,week0,scan3.npy,,3,30,Female,Val3
S2,week1,scan4.npy,False,4,40,Female,Val1
S3,week0,scan1.npy,,5,50,Male,Val2
S3,week1,scan3.npy,True,6,60,Male,Val2
"""
    (dataset_folder / "dataset.csv").write_text(dataset_contents)
    numerical_columns = ["numerical1", "numerical2"] if use_non_imaging_features else []
    categorical_columns = ["categorical1", "categorical2"] if use_non_imaging_features else []
    non_image_feature_channels = get_non_image_features_dict(default_channels=["week1", "week0"],
                                                             specific_channels={"categorical2": ["week1"]}) \
        if use_non_imaging_features else {}
    config_for_dataset = ScalarModelBase(
        local_dataset=dataset_folder,
        image_channels=["week0", "week1"],
        image_file_column="path",
        label_channels=["week1"],
        label_value_column="label",
        non_image_feature_channels=non_image_feature_channels,
        numerical_columns=numerical_columns,
        categorical_columns=categorical_columns,
        should_validate=False
    )
    config_for_dataset.read_dataset_into_dataframe_and_pre_process()

    dataset = ScalarDataset(config_for_dataset,
                            sample_transforms=ScalarItemAugmentation(
                                RandAugmentSlice(is_transformation_for_segmentation_maps=False)))
    assert len(dataset) == 3

    config = ImageEncoder(
        encode_channels_jointly=encode_channels_jointly,
        should_validate=False,
        numerical_columns=numerical_columns,
        categorical_columns=categorical_columns,
        non_image_feature_channels=non_image_feature_channels,
        categorical_feature_encoder=config_for_dataset.categorical_feature_encoder,
        encoder_dimensionality_reduction_factor=reduction_factor,
        aggregation_type=aggregation_type,
        scan_size=(6, 64, 60)
    )

    if kernel_size_per_encoding_block:
        config.kernel_size_per_encoding_block = kernel_size_per_encoding_block
    if stride_size_per_encoding_block:
        config.stride_size_per_encoding_block = stride_size_per_encoding_block

    config.set_output_to(test_output_dirs.root_dir)
    config.max_batch_grad_cam = 1
    model = create_model_with_temperature_scaling(config)
    input_size: List[Tuple] = [(len(config.image_channels), *scan_size)]
    if use_non_imaging_features:
        input_size.append((config.get_total_number_of_non_imaging_features(),))

        # Original number output channels (unreduced) is
        # num initial channel * (num encoder block - 1) = 4 * (3-1) = 8
        if encode_channels_jointly:
            # reduced_num_channels + num_non_img_features
            assert model.final_num_feature_channels == expected_num_reduced_features + \
                   config.get_total_number_of_non_imaging_features()
        else:
            # num_img_channels * reduced_num_channels + num_non_img_features
            assert model.final_num_feature_channels == len(config.image_channels) * expected_num_reduced_features + \
                   config.get_total_number_of_non_imaging_features()

    summarizer = ModelSummary(model)
    summarizer.generate_summary(input_sizes=input_size)
    config.local_dataset = dataset_folder
    config.validate()
    model_train(config, checkpoint_handler=get_default_checkpoint_handler(model_config=config,
                                                                          project_root=Path(test_output_dirs.root_dir)))
コード例 #8
0
class BaseSegmentationModel(DeviceAwareModule, ABC):
    """
    Base neural network segmentation model.
    """
    @initialize_instance_variables
    def __init__(self,
                 name: str,
                 input_channels: int,
                 crop_size_constraints: Optional[CropSizeConstraints] = None):
        """
        Creates a new instance of the base model class.
        :param name: A human readable name of the model.
        :param input_channels: The number of image input channels.
        :param crop_size_constraints: The size constraints for the training crop size. If not provided,
        a minimum crop size of 1 is assumed.
        """
        super().__init__()
        self.num_dimensions = 3
        self.name = name
        self.input_channels = input_channels
        self.summarizer: Optional[ModelSummary] = None
        self.summary: Optional[OrderedDict] = None
        self.summary_crop_size: Optional[TupleInt3] = None
        if crop_size_constraints is None:
            # Allow any size. With this initialization, both multiple_of and minimum_size will be populated.
            crop_size_constraints = CropSizeConstraints(multiple_of=1)
        self.crop_size_constraints = crop_size_constraints

    def get_output_shape(
            self, input_shape: Union[TupleInt2, TupleInt3]) -> Tuple[int, ...]:
        """
        Computes model's output tensor shape for given input tensor shape.
        The argument is expected to be either a 2-tuple or a 3-tuple. A batch dimension (1)
        and the number of channels are added as the first dimensions. The result tuple has batch and channel dimension
        stripped off.
        :param input_shape: A tuple (2D or 3D) representing incoming tensor shape.
        """
        # Create a sample tensor for inference
        batch_size = 1
        if len(input_shape) not in [2, 3]:
            raise ValueError(
                "Input shape has to be in 2D or 3D, found {}".format(
                    len(input_shape)))
        input_tensors = \
            [torch.zeros(batch_size, self.input_channels, *input_shape, dtype=torch.float)]

        # Perform a forward pass then restore the state of the module
        output_shape = forward_preserve_state(module=self,
                                              inputs=input_tensors).size()
        return tuple(output_shape[2:])

    def partition_model(self,
                        devices: Optional[List[torch.device]] = None) -> None:
        """A method to partition a neural network model across multiple devices.
        If no list of devices is given, use all available GPU devices."""
        pass

    def validate_crop_size(self,
                           crop_size: TupleInt3,
                           message_prefix: Optional[str] = None) -> None:
        """
        Checks if the given crop size is a valid crop size for the present model.
        If it is not valid, throw a ValueError.
        :param crop_size: The crop size that should be checked.
        :param message_prefix: A string prefix for the error message if the crop size is found to be invalid.
        """
        if self.crop_size_constraints is not None:
            self.crop_size_constraints.validate(crop_size, message_prefix)

    def generate_model_summary(self,
                               crop_size: Optional[TupleInt3] = None,
                               log_summaries_to_files: bool = False) -> None:
        """
        Stores a model summary, containing information about layers, memory consumption and runtime
        in the model.summary field.
        When called again with the same crop_size, the summary is not created again.
        :param crop_size: The crop size for which the summary should be created. If not provided,
        the minimum allowed crop size is used.
        :param log_summaries_to_files: whether to write the summary to a file
        """
        if crop_size is None:
            crop_size = self.crop_size_constraints.minimum_size  # type: ignore
            assert crop_size is not None
        input_size = [crop_size]
        if self.summary is None or self.summary_crop_size != input_size:
            self.summarizer = ModelSummary(self)
            self.summary = self.summarizer.generate_summary(
                input_sizes=[(self.input_channels, *crop_size)],
                log_summaries_to_files=log_summaries_to_files)
            self.summary_crop_size = crop_size

    @abc.abstractmethod
    def forward(self, input: Any) -> Any:  # type: ignore
        raise NotImplementedError("forward must be implemented by subclasses")

    def get_all_child_layers(self) -> List[torch.nn.Module]:
        raise NotImplementedError(
            "get_all_child_layers must be implemented by subclasses")