Beispiel #1
0
    def initLossAggregators(self):
        if self.train_losses is not None and self.val_losses is not None:
            assert set(self.train_losses.keys()) == set(self.val_losses.keys()), 'train and val loss keys are not equal, ' + ', '.join(map(str, self.train_losses.keys())) + ' and ' + ', '.join(map(str, self.val_losses.keys()))

        if self.train_losses is None:
            return

        summaries_placeholders = OrderedDict([(loss_name, create_summary_placeholder(loss_name)) for loss_name in self.train_losses.keys()])

        # mean values used for summaries
        self.train_loss_aggregator = SummaryHandler(self.sess,
                                                    self.train_losses,
                                                    summaries_placeholders,
                                                    'train',
                                                    os.path.join(self.output_folder, 'train'),
                                                    os.path.join(self.output_folder, 'train.csv', ))

        if self.val_losses is None:
            return

        summaries_placeholders_val = summaries_placeholders.copy()

        if self.additional_summaries_placeholders_val is not None:
            summaries_placeholders_val.update(self.additional_summaries_placeholders_val)

        self.val_loss_aggregator = SummaryHandler(self.sess,
                                                  self.val_losses,
                                                  summaries_placeholders_val,
                                                  'test',
                                                  os.path.join(self.output_folder, 'test'),
                                                  os.path.join(self.output_folder, 'test.csv'))
    def __init__(self, modality, cv):
        super().__init__()
        self.modality = modality
        self.cv = cv
        self.batch_size = 1
        self.learning_rates = [0.00001, 0.000001]
        self.learning_rate_boundaries = [20000]
        self.max_iter = 40000
        self.test_iter = 5000
        self.disp_iter = 100
        self.snapshot_iter = self.test_iter
        self.test_initialization = False
        self.current_iter = 0
        self.reg_constant = 0.0001
        self.num_labels = 8
        self.data_format = 'channels_first'
        self.channel_axis = 1
        self.save_debug_images = False

        self.has_validation_groundtruth = cv != 0
        self.base_folder = 'mmwhs_dataset'
        self.image_size = [64, 64, 64]
        if modality == 'ct':
            self.image_spacing = [3, 3, 3]
        else:
            self.image_spacing = [4, 4, 4]
        self.input_gaussian_sigma = 1.0
        self.label_gaussian_sigma = 1.0
        self.use_landmarks = True

        self.output_folder = './output/scn_' + modality + '_' + str(
            cv) + '/' + self.output_folder_timestamp()

        dataset_parameters = dict(
            image_size=self.image_size,
            image_spacing=self.image_spacing,
            base_folder=self.base_folder,
            cv=self.cv,
            modality=self.modality,
            input_gaussian_sigma=self.input_gaussian_sigma,
            label_gaussian_sigma=self.label_gaussian_sigma,
            use_landmarks=self.use_landmarks,
            num_labels=self.num_labels,
            data_format=self.data_format,
            save_debug_images=self.save_debug_images)

        self.dataset = Dataset(**dataset_parameters)

        self.dataset_train = self.dataset.dataset_train()
        self.dataset_val = self.dataset.dataset_val()
        self.dataset_val = self.dataset.dataset_val()
        self.files_to_copy = ['main.py', 'network.py', 'dataset.py']
        self.dice_names = list(
            map(lambda x: 'dice_{}'.format(x), range(self.num_labels)))
        self.additional_summaries_placeholders_val = dict([
            (name, create_summary_placeholder(name))
            for name in self.dice_names
        ])
        self.loss_function = softmax_cross_entropy_with_logits
        self.network = network_scn
    def __init__(self, param):
        super().__init__()
        #polyaxon
        data_dir = os.path.join(
            list(get_data_paths().values())[0], "lung/JSRT/preprocessed/")
        logging.info('DATA DIR = ' + data_dir)
        output_path = get_outputs_path()

        self.loss_function = param[0]
        self.network = param[1]
        self.routing_type = param[2]

        self.batch_size = 1
        self.learning_rates = [1, 1]
        self.max_iter = 300000
        self.test_iter = 10000
        self.disp_iter = 100
        self.snapshot_iter = self.test_iter
        self.test_initialization = False
        self.current_iter = 0
        self.num_labels = 6
        self.data_format = 'channels_first'  #WARNING: Capsule might not work with channel last !
        self.channel_axis = 1
        self.save_debug_images = False
        self.base_folder = data_dir  ##input folder
        self.image_size = [128, 128]
        self.image_spacing = [1, 1]
        self.output_folder = output_path + self.network.__name__ + '_' + self.output_folder_timestamp(
        )  ##output save
        self.dataset = Dataset(image_size=self.image_size,
                               image_spacing=self.image_spacing,
                               num_labels=self.num_labels,
                               base_folder=self.base_folder,
                               data_format=self.data_format,
                               save_debug_images=self.save_debug_images)

        self.dataset_train = self.dataset.dataset_train()
        self.dataset_train.get_next()
        self.dataset_val = self.dataset.dataset_val()
        self.dice_names = list(
            map(lambda x: 'dice_{}'.format(x), range(self.num_labels)))
        self.additional_summaries_placeholders_val = dict([
            (name, create_summary_placeholder(name))
            for name in self.dice_names
        ])

        if self.network.__name__ is 'network_ud':
            self.net_file = './Lung_Segmentation/LungSeg/cnn_network.py'
        elif self.network.__name__ is 'SegCaps_multilabels':
            self.net_file = './Lung_Segmentation/LungSeg/SegCaps/SegCaps.py'
        else:
            self.net_file = './Lung_Segmentation/LungSeg/capsule_network.py'
        self.files_to_copy = ['main_train_and_test.py', self.net_file]
Beispiel #4
0
    def __init__(self,
                 cv,
                 network,
                 unet,
                 network_parameters,
                 learning_rate,
                 output_folder_name=''):
        """
        Initializer.
        :param cv: The cv fold. 0, 1, 2 for CV; 'train_all' for training on whole dataset.
        :param network: The used network. Usually network_u.
        :param unet: The specific instance of the U-Net. Usually UnetClassicAvgLinear3d.
        :param network_parameters: The network parameters passed to unet.
        :param learning_rate: The initial learning rate.
        :param output_folder_name: The output folder name that is used for distinguishing experiments.
        """
        super().__init__()
        self.batch_size = 1
        self.learning_rates = [
            learning_rate, learning_rate * 0.5, learning_rate * 0.1
        ]
        self.learning_rate_boundaries = [20000, 30000]
        self.max_iter = 50000
        self.test_iter = 5000
        self.disp_iter = 100
        self.snapshot_iter = 5000
        self.test_initialization = False
        self.current_iter = 0
        self.reg_constant = 0.000001
        self.num_labels = 1
        self.num_labels_all = 26
        self.data_format = 'channels_first'
        self.channel_axis = 1
        self.network = network
        self.unet = unet
        self.network_parameters = network_parameters
        self.padding = 'same'
        self.clip_gradient_global_norm = 1.0

        self.use_pyro_dataset = False
        self.save_output_images = True
        self.save_output_images_as_uint = True  # set to False, if you want to see the direct network output
        self.save_debug_images = False
        self.has_validation_groundtruth = cv in [0, 1, 2]
        self.local_base_folder = '../verse2019_dataset'
        self.image_size = [128, 128, 96]
        self.image_spacing = [1] * 3
        self.output_folder = os.path.join('./output/vertebrae_segmentation/',
                                          network.__name__, unet.__name__,
                                          output_folder_name, str(cv),
                                          self.output_folder_timestamp())
        dataset_parameters = {
            'base_folder': self.local_base_folder,
            'image_size': self.image_size,
            'image_spacing': self.image_spacing,
            'cv': cv,
            'input_gaussian_sigma': 0.75,
            'label_gaussian_sigma': 1.0,
            'heatmap_sigma': 3.0,
            'generate_single_vertebrae_heatmap': True,
            'generate_single_vertebrae': True,
            'save_debug_images': self.save_debug_images
        }

        dataset = Dataset(**dataset_parameters)
        if self.use_pyro_dataset:
            server_name = '@localhost:51232'
            uri = 'PYRO:verse_dataset' + server_name
            print('using pyro uri', uri)
            self.dataset_train = PyroClientDataset(uri, **dataset_parameters)
        else:
            self.dataset_train = dataset.dataset_train()
        self.dataset_val = dataset.dataset_val()

        self.dice_names = ['mean_dice'] + list(
            map(lambda x: 'dice_{}'.format(x), range(self.num_labels_all)))
        self.hausdorff_names = ['mean_h'] + list(
            map(lambda x: 'h_{}'.format(x), range(self.num_labels)))
        self.additional_summaries_placeholders_val = dict([
            (name, create_summary_placeholder(name))
            for name in (self.dice_names + self.hausdorff_names)
        ])
        self.loss_function = sigmoid_cross_entropy_with_logits

        self.setup_base_folder = os.path.join(self.local_base_folder, 'setup')
        if cv in [0, 1, 2]:
            self.cv_folder = os.path.join(self.setup_base_folder,
                                          os.path.join('cv', str(cv)))
            self.test_file = os.path.join(self.cv_folder, 'val.txt')
        else:
            self.test_file = os.path.join(self.setup_base_folder,
                                          'train_all.txt')
        self.valid_landmarks_file = os.path.join(self.setup_base_folder,
                                                 'valid_landmarks.csv')
        self.test_id_list = utils.io.text.load_list(self.test_file)
        self.valid_landmarks = utils.io.text.load_dict_csv(
            self.valid_landmarks_file)
Beispiel #5
0
    def __init__(self,
                 cv,
                 network,
                 unet,
                 network_parameters,
                 learning_rate,
                 output_folder_name=''):
        """
        Initializer.
        :param cv: The cv fold. 0, 1, 2 for CV; 'train_all' for training on whole dataset.
        :param network: The used network. Usually network_u.
        :param unet: The specific instance of the U-Net. Usually UnetClassicAvgLinear3d.
        :param network_parameters: The network parameters passed to unet.
        :param learning_rate: The initial learning rate.
        :param output_folder_name: The output folder name that is used for distinguishing experiments.
        """
        super().__init__()
        self.batch_size = 1
        self.learning_rates = [
            learning_rate, learning_rate * 0.5, learning_rate * 0.1
        ]
        self.learning_rate_boundaries = [50000, 75000]
        self.max_iter = 100000
        self.test_iter = 10000
        self.disp_iter = 100
        self.snapshot_iter = 5000
        self.test_initialization = False
        self.current_iter = 0
        self.reg_constant = 0.0005
        self.use_background = True
        self.num_landmarks = 25
        self.heatmap_sigma = 4.0
        self.learnable_sigma = True
        self.data_format = 'channels_first'
        self.network = network
        self.unet = unet
        self.network_parameters = network_parameters
        self.padding = 'same'
        self.clip_gradient_global_norm = 100000.0

        self.use_pyro_dataset = False
        self.use_spine_postprocessing = True
        self.save_output_images = True
        self.save_output_images_as_uint = True  # set to False, if you want to see the direct network output
        self.save_debug_images = False
        self.has_validation_groundtruth = cv in [0, 1, 2]
        self.local_base_folder = '../verse2019_dataset'
        self.image_size = [96, 96, 128]
        self.image_spacing = [2] * 3
        self.cropped_inc = [0, 96, 0, 0]
        self.heatmap_size = self.image_size
        self.sigma_regularization = 100
        self.sigma_scale = 1000.0
        self.cropped_training = True
        self.output_folder = os.path.join('./output/vertebrae_localization/',
                                          network.__name__, unet.__name__,
                                          output_folder_name, str(cv),
                                          self.output_folder_timestamp())
        dataset_parameters = {
            'base_folder': self.local_base_folder,
            'image_size': self.image_size,
            'image_spacing': self.image_spacing,
            'cv': cv,
            'input_gaussian_sigma': 0.75,
            'generate_landmarks': True,
            'generate_landmark_mask': True,
            'translate_to_center_landmarks': True,
            'translate_by_random_factor': True,
            'save_debug_images': self.save_debug_images
        }

        dataset = Dataset(**dataset_parameters)
        if self.use_pyro_dataset:
            server_name = '@localhost:51232'
            uri = 'PYRO:verse_dataset' + server_name
            print('using pyro uri', uri)
            self.dataset_train = PyroClientDataset(uri, **dataset_parameters)
        else:
            self.dataset_train = dataset.dataset_train()
        self.dataset_val = dataset.dataset_val()

        self.point_statistics_names = [
            'pe_mean', 'pe_stdev', 'pe_median', 'num_correct'
        ]
        self.additional_summaries_placeholders_val = dict([
            (name, create_summary_placeholder(name))
            for name in self.point_statistics_names
        ])
Beispiel #6
0
    def __init__(self, cv, network_id):
        super().__init__()
        self.cv = cv
        self.network_id = network_id
        self.output_folder = network_id
        if cv != -1:
            self.output_folder += '_cv{}'.format(cv)
        self.output_folder += '/' + self.output_folder_timestamp()
        self.batch_size = 1
        learning_rates = {'scn': 0.00000005, 'unet': 0.000000005}
        max_iters = {'scn': 40000, 'unet': 80000}
        self.learning_rate = learning_rates[self.network_id]
        self.max_iter = max_iters[self.network_id]
        self.test_iter = 2500
        self.disp_iter = 100
        self.snapshot_iter = self.test_iter
        self.test_initialization = False
        self.current_iter = 0
        self.reg_constant = 0.0005
        self.sigma_regularization = 100
        self.sigma_scale = 1000
        self.invert_transformation = False
        self.num_landmarks = 26
        self.image_size = [96, 96, 192]
        self.image_spacing = [2, 2, 2]
        self.heatmap_size = self.image_size
        self.image_channels = 1
        self.heatmap_sigma = 4
        self.data_format = 'channels_first'
        self.save_debug_images = False
        self.base_folder = 'spine_localization_dataset'
        self.generate_landmarks = True
        self.cropped_training = True
        self.cropped_inc = [0, 64, 0, 0]
        if self.cropped_training:
            dataset = Dataset(self.image_size,
                              self.image_spacing,
                              self.heatmap_sigma,
                              self.num_landmarks,
                              self.base_folder,
                              self.cv,
                              self.data_format,
                              self.save_debug_images,
                              generate_heatmaps=not self.generate_landmarks,
                              generate_landmarks=self.generate_landmarks)
            self.dataset_train = dataset.dataset_train()
            dataset = Dataset(self.image_size,
                              self.image_spacing,
                              self.heatmap_sigma,
                              self.num_landmarks,
                              self.base_folder,
                              self.cv,
                              self.data_format,
                              self.save_debug_images,
                              generate_heatmaps=not self.generate_landmarks,
                              generate_landmarks=self.generate_landmarks)
            self.dataset_val = dataset.dataset_val()
        else:
            dataset = Dataset(self.image_size,
                              self.image_spacing,
                              self.heatmap_sigma,
                              self.num_landmarks,
                              self.base_folder,
                              self.cv,
                              self.data_format,
                              self.save_debug_images,
                              generate_heatmaps=not self.generate_landmarks,
                              generate_landmarks=self.generate_landmarks,
                              translate_by_random_factor=False)
            self.dataset_train = dataset.dataset_train()
            self.dataset_val = dataset.dataset_val()

        networks = {'scn': network_scn, 'unet': network_unet}
        self.network = networks[self.network_id]

        self.point_statistics_names = [
            'pe_mean', 'pe_stdev', 'pe_median', 'num_correct'
        ]
        self.additional_summaries_placeholders_val = dict([
            (name, create_summary_placeholder(name))
            for name in self.point_statistics_names
        ])
    def __init__(self, network_id, cv, landmark_source, sigma_regularization, output_folder_name=''):
        super().__init__()
        self.network_id = network_id
        self.output_folder = os.path.join('output', network_id, landmark_source, cv if cv >= 0 else 'all', output_folder_name, self.output_folder_timestamp())
        self.batch_size = 1
        self.max_iter = 30000
        self.learning_rate = 0.000001
        self.test_iter = 5000
        self.disp_iter = 100
        self.snapshot_iter = self.test_iter
        self.test_initialization = False
        self.current_iter = 0
        self.reg_constant = 0.001
        self.cv = cv
        self.landmark_source = landmark_source
        original_image_extend = [193.5, 240.0]
        image_sizes = {'unet': [512, 512],
                       'scn_mia': [512, 512]}
        heatmap_sizes = {'unet': [512, 512],
                         'scn_mia': [512, 512]}
        sigmas = {'unet': 2.5,
                  'scn_mia': 2.5}
        self.image_size = image_sizes[self.network_id]
        self.heatmap_size = heatmap_sizes[self.network_id]
        self.image_spacing = [float(np.max([e / s for e, s in zip(original_image_extend, self.image_size)]))] * 2
        self.sigma = sigmas[self.network_id]
        self.image_channels = 1
        self.num_landmarks = 19
        self.heatmap_sigma = self.sigma
        self.sigma_regularization = sigma_regularization
        self.sigma_scale = 100.0
        self.data_format = 'channels_first'
        self.save_debug_images = False
        self.base_folder = './'
        dataset_parameters = {'image_size': self.image_size,
                              'heatmap_size': self.heatmap_size,
                              'image_spacing': self.image_spacing,
                              'num_landmarks': self.num_landmarks,
                              'base_folder': self.base_folder,
                              'data_format': self.data_format,
                              'save_debug_images': self.save_debug_images,
                              'cv': self.cv,
                              'landmark_source': self.landmark_source}

        dataset = Dataset(**dataset_parameters)
        self.dataset_train = dataset.dataset_train()
        self.dataset_val = dataset.dataset_val()

        networks = {'unet': network_unet,
                    'scn_mia': network_scn_mia}
        self.network = networks[self.network_id]
        self.landmark_metrics = ['pe_mean', 'pe_std', 'pe_median', 'or2', 'or25', 'or3', 'or4', 'or10']
        self.landmark_metric_prefixes = ['challenge', 'senior', 'junior', 'mean']
        self.additional_summaries_placeholders_val = OrderedDict([(prefix + '_' + name, create_summary_placeholder(prefix + '_' + name)) for name in self.landmark_metrics for prefix in self.landmark_metric_prefixes])