Ejemplo n.º 1
0
    def __init__(self, masking_from_epoch, num_epochs, moving_mask_percent, masking_linear_increase):
        self.masking_from_epoch = masking_from_epoch
        self.num_epochs = num_epochs
        self.moving_mask_percent = moving_mask_percent
        self.masking_linear_increase = masking_linear_increase

        self.segmentation_input_key = ('color_aug', 0, 0)
        self.logits_key = ('segmentation_logits', 0)

        self.metric_model_moving = SegmentationRunningScore(2)

        self.iou_thresh = dict()
        self.iou_thresh['non_moving'] = 0.0
        self.iou_thresh['moving'] = 0.0

        self.iou_log = dict()
        self.iou_log['non_moving'] = list()
        self.iou_log['moving'] = list()
Ejemplo n.º 2
0
    def _run_segmentation_validation(self, images_to_keep=0):
        scores = dict()
        images = dict()

        # torch.no_grad() = disable gradient calculation
        with torch.no_grad(), self.state.model_manager.get_eval() as model:
            for batch in self.segmentation_validation_loader:
                domain = batch[0]['domain'][0]
                num_classes = batch[0]['num_classes'][0].item()

                if domain not in scores:
                    scores[domain] = SegmentationRunningScore(num_classes)
                    images[domain] = list()

                _ = self._validate_batch_segmentation(model, batch,
                                                      scores[domain],
                                                      images[domain])

                images[domain] = images[domain][:images_to_keep]

        return scores, images
Ejemplo n.º 3
0
    def __init__(self, options, model=None):

        if __name__ == "__main__":
            print(" -> Executing script", os.path.basename(__file__))

        self.opt = options
        self.device = torch.device("cpu" if self.opt.no_cuda else "cuda")

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           LABELS
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        assert self.opt.train_set in {1, 2, 3, 12, 123}, "Invalid train_set!"
        assert self.opt.task_to_val in {0, 1, 2, 3, 12, 123}, "Invalid task!"
        keys_to_load = ['color', 'segmentation']

        # Labels
        labels = self._get_labels_cityscapes()

        # Train IDs
        self.train_ids = set([labels[i].trainId for i in range(len(labels))])
        self.train_ids.remove(255)
        self.train_ids = sorted(list(self.train_ids))

        self.num_classes_model = len(self.train_ids)

        # Task handling
        if self.opt.task_to_val != 0:
            labels_task = self._get_task_labels_cityscapes()
            train_ids_task = set(
                [labels_task[i].trainId for i in range(len(labels_task))])
            train_ids_task.remove(255)
            self.task_low = min(train_ids_task)
            self.task_high = max(train_ids_task) + 1
            labels = labels_task
            self.train_ids = sorted(list(train_ids_task))
        else:
            self.task_low = 0
            self.task_high = self.num_classes_model
            self.opt.task_to_val = self.opt.train_set

        # Number of classes for the SegmentationRunningScore
        self.num_classes_score = self.task_high - self.task_low

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           DATASET DEFINITIONS
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        # Data augmentation
        test_data_transforms = [
            mytransforms.CreateScaledImage(),
            mytransforms.Resize((self.opt.height, self.opt.width),
                                image_types=['color']),
            mytransforms.ConvertSegmentation(),
            mytransforms.CreateColoraug(new_element=True,
                                        scales=self.opt.scales),
            mytransforms.RemoveOriginals(),
            mytransforms.ToTensor(),
            mytransforms.NormalizeZeroMean(),
        ]

        # If hyperparameter search, only load the respective validation set. Else, load the full validation set.
        if self.opt.hyperparameter:
            trainvaltest_split = 'train'
            folders_to_load = CitySet.get_city_set(-1)
        else:
            trainvaltest_split = 'validation'
            folders_to_load = None

        test_dataset = CityscapesDataset(dataset='cityscapes',
                                         split=self.opt.dataset_split,
                                         trainvaltest_split=trainvaltest_split,
                                         video_mode='mono',
                                         stereo_mode='mono',
                                         scales=self.opt.scales,
                                         labels_mode='fromid',
                                         labels=labels,
                                         keys_to_load=keys_to_load,
                                         data_transforms=test_data_transforms,
                                         video_frames=self.opt.video_frames,
                                         folders_to_load=folders_to_load)

        self.test_loader = DataLoader(dataset=test_dataset,
                                      batch_size=self.opt.batch_size,
                                      shuffle=False,
                                      num_workers=self.opt.num_workers,
                                      pin_memory=True,
                                      drop_last=False)

        print(
            "++++++++++++++++++++++ INIT VALIDATION ++++++++++++++++++++++++")
        print("Using dataset\n  ", self.opt.dataset, "with split",
              self.opt.dataset_split)
        print("There are {:d} validation items\n  ".format(len(test_dataset)))
        print("Validating classes up to train set\n  ", self.opt.train_set)

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           LOGGING OPTIONS
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        # If no model is passed, standalone validation is to be carried out. The log_path needs to be set before
        # self.load_model() is invoked.
        if model is None:
            self.opt.validate = False
            self.opt.model_name = self.opt.load_model_name

        path_getter = GetPath()
        log_path = path_getter.get_checkpoint_path()
        self.log_path = os.path.join(log_path, 'erfnet', self.opt.model_name)

        # All outputs will be saved to save_path
        self.save_path = self.log_path

        # Create output path for standalone validation
        if not self.opt.validate:
            save_dir = 'eval_{}'.format(self.opt.dataset)

            if self.opt.hyperparameter:
                save_dir = save_dir + '_hyper'

            save_dir = save_dir + '_task_to_val{}'.format(self.opt.task_to_val)

            self.save_path = os.path.join(self.log_path, save_dir)

            if not os.path.exists(self.save_path):
                os.makedirs(self.save_path)

        # Copy this file to save_path
        shutil.copy2(__file__, self.save_path)

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           MODEL DEFINITION
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        # Standalone validation
        if not self.opt.validate:
            # Create a conventional ERFNet
            self.model = ERFNet(self.num_classes_model, self.opt)
            self.load_model()
            self.model.to(self.device)

        # Validate while training
        else:
            self.model = model

        self.model.eval()

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           LOGGING OPTIONS II
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        # self.called is used to decide which file mode shall be used when writing metrics to disk.
        self.called = False

        self.metric_model = SegmentationRunningScore(self.num_classes_score)

        # Metrics are only saved if val_frequency > 0!
        if self.opt.val_frequency != 0:
            print("Saving metrics to\n  ", self.save_path)

        # Set up colour output. Coloured images are only output if standalone validation is carried out!
        if not self.opt.validate and self.opt.save_pred_to_disk:
            # Output path
            self.img_path = os.path.join(
                self.save_path, 'output_{}'.format(self.opt.weights_epoch))

            if self.opt.pred_wout_blend:
                self.img_path += '_wout_blend'

            if not os.path.exists(self.img_path):
                os.makedirs(self.img_path)
            print("Saving prediction images to\n  ", self.img_path)
            print("Save frequency\n  ", self.opt.pred_frequency)

            # Get the colours from dataset.
            colors = [
                (label.trainId - self.task_low, label.color)
                for label in labels
                if label.trainId != 255 and label.trainId in self.train_ids
            ]
            colors.append((255, (0, 0, 0)))  # void class
            self.id_color = dict(colors)
            self.id_color_keys = [key for key in self.id_color.keys()]
            self.id_color_vals = [val for val in self.id_color.values()]

            # Ongoing index to name the outputs
            self.img_idx = 0

        # Set up probability output. Probabilities are only output if standalone validation is carried out!
        if not self.opt.validate and self.opt.save_probs_to_disk:
            # Output path
            self.logit_path = os.path.join(
                self.save_path,
                'probabilities_{}'.format(self.opt.weights_epoch))
            if not os.path.exists(self.logit_path):
                os.makedirs(self.logit_path)
            print("Saving probabilities to\n  ", self.logit_path)
            print("Save frequency\n  ", self.opt.probs_frequency)

            # Ongoing index to name the probability outputs
            self.probs_idx = 0

        print(
            "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")

        # Save all options to disk and print them to stdout
        self._print_options()
        self._save_opts(len(test_dataset))
Ejemplo n.º 4
0
class Evaluator:
    def __init__(self, options, model=None):

        if __name__ == "__main__":
            print(" -> Executing script", os.path.basename(__file__))

        self.opt = options
        self.device = torch.device("cpu" if self.opt.no_cuda else "cuda")

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           LABELS
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        assert self.opt.train_set in {1, 2, 3, 12, 123}, "Invalid train_set!"
        assert self.opt.task_to_val in {0, 1, 2, 3, 12, 123}, "Invalid task!"
        keys_to_load = ['color', 'segmentation']

        # Labels
        labels = self._get_labels_cityscapes()

        # Train IDs
        self.train_ids = set([labels[i].trainId for i in range(len(labels))])
        self.train_ids.remove(255)
        self.train_ids = sorted(list(self.train_ids))

        self.num_classes_model = len(self.train_ids)

        # Task handling
        if self.opt.task_to_val != 0:
            labels_task = self._get_task_labels_cityscapes()
            train_ids_task = set(
                [labels_task[i].trainId for i in range(len(labels_task))])
            train_ids_task.remove(255)
            self.task_low = min(train_ids_task)
            self.task_high = max(train_ids_task) + 1
            labels = labels_task
            self.train_ids = sorted(list(train_ids_task))
        else:
            self.task_low = 0
            self.task_high = self.num_classes_model
            self.opt.task_to_val = self.opt.train_set

        # Number of classes for the SegmentationRunningScore
        self.num_classes_score = self.task_high - self.task_low

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           DATASET DEFINITIONS
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        # Data augmentation
        test_data_transforms = [
            mytransforms.CreateScaledImage(),
            mytransforms.Resize((self.opt.height, self.opt.width),
                                image_types=['color']),
            mytransforms.ConvertSegmentation(),
            mytransforms.CreateColoraug(new_element=True,
                                        scales=self.opt.scales),
            mytransforms.RemoveOriginals(),
            mytransforms.ToTensor(),
            mytransforms.NormalizeZeroMean(),
        ]

        # If hyperparameter search, only load the respective validation set. Else, load the full validation set.
        if self.opt.hyperparameter:
            trainvaltest_split = 'train'
            folders_to_load = CitySet.get_city_set(-1)
        else:
            trainvaltest_split = 'validation'
            folders_to_load = None

        test_dataset = CityscapesDataset(dataset='cityscapes',
                                         split=self.opt.dataset_split,
                                         trainvaltest_split=trainvaltest_split,
                                         video_mode='mono',
                                         stereo_mode='mono',
                                         scales=self.opt.scales,
                                         labels_mode='fromid',
                                         labels=labels,
                                         keys_to_load=keys_to_load,
                                         data_transforms=test_data_transforms,
                                         video_frames=self.opt.video_frames,
                                         folders_to_load=folders_to_load)

        self.test_loader = DataLoader(dataset=test_dataset,
                                      batch_size=self.opt.batch_size,
                                      shuffle=False,
                                      num_workers=self.opt.num_workers,
                                      pin_memory=True,
                                      drop_last=False)

        print(
            "++++++++++++++++++++++ INIT VALIDATION ++++++++++++++++++++++++")
        print("Using dataset\n  ", self.opt.dataset, "with split",
              self.opt.dataset_split)
        print("There are {:d} validation items\n  ".format(len(test_dataset)))
        print("Validating classes up to train set\n  ", self.opt.train_set)

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           LOGGING OPTIONS
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        # If no model is passed, standalone validation is to be carried out. The log_path needs to be set before
        # self.load_model() is invoked.
        if model is None:
            self.opt.validate = False
            self.opt.model_name = self.opt.load_model_name

        path_getter = GetPath()
        log_path = path_getter.get_checkpoint_path()
        self.log_path = os.path.join(log_path, 'erfnet', self.opt.model_name)

        # All outputs will be saved to save_path
        self.save_path = self.log_path

        # Create output path for standalone validation
        if not self.opt.validate:
            save_dir = 'eval_{}'.format(self.opt.dataset)

            if self.opt.hyperparameter:
                save_dir = save_dir + '_hyper'

            save_dir = save_dir + '_task_to_val{}'.format(self.opt.task_to_val)

            self.save_path = os.path.join(self.log_path, save_dir)

            if not os.path.exists(self.save_path):
                os.makedirs(self.save_path)

        # Copy this file to save_path
        shutil.copy2(__file__, self.save_path)

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           MODEL DEFINITION
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        # Standalone validation
        if not self.opt.validate:
            # Create a conventional ERFNet
            self.model = ERFNet(self.num_classes_model, self.opt)
            self.load_model()
            self.model.to(self.device)

        # Validate while training
        else:
            self.model = model

        self.model.eval()

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           LOGGING OPTIONS II
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        # self.called is used to decide which file mode shall be used when writing metrics to disk.
        self.called = False

        self.metric_model = SegmentationRunningScore(self.num_classes_score)

        # Metrics are only saved if val_frequency > 0!
        if self.opt.val_frequency != 0:
            print("Saving metrics to\n  ", self.save_path)

        # Set up colour output. Coloured images are only output if standalone validation is carried out!
        if not self.opt.validate and self.opt.save_pred_to_disk:
            # Output path
            self.img_path = os.path.join(
                self.save_path, 'output_{}'.format(self.opt.weights_epoch))

            if self.opt.pred_wout_blend:
                self.img_path += '_wout_blend'

            if not os.path.exists(self.img_path):
                os.makedirs(self.img_path)
            print("Saving prediction images to\n  ", self.img_path)
            print("Save frequency\n  ", self.opt.pred_frequency)

            # Get the colours from dataset.
            colors = [
                (label.trainId - self.task_low, label.color)
                for label in labels
                if label.trainId != 255 and label.trainId in self.train_ids
            ]
            colors.append((255, (0, 0, 0)))  # void class
            self.id_color = dict(colors)
            self.id_color_keys = [key for key in self.id_color.keys()]
            self.id_color_vals = [val for val in self.id_color.values()]

            # Ongoing index to name the outputs
            self.img_idx = 0

        # Set up probability output. Probabilities are only output if standalone validation is carried out!
        if not self.opt.validate and self.opt.save_probs_to_disk:
            # Output path
            self.logit_path = os.path.join(
                self.save_path,
                'probabilities_{}'.format(self.opt.weights_epoch))
            if not os.path.exists(self.logit_path):
                os.makedirs(self.logit_path)
            print("Saving probabilities to\n  ", self.logit_path)
            print("Save frequency\n  ", self.opt.probs_frequency)

            # Ongoing index to name the probability outputs
            self.probs_idx = 0

        print(
            "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")

        # Save all options to disk and print them to stdout
        self._print_options()
        self._save_opts(len(test_dataset))

    def _get_labels_cityscapes(self, id=None):
        if id is None:
            id = self.opt.train_set

        if id == 1:
            labels = labels_cityscape_seg_train1.getlabels()
        elif id == 2:
            labels = labels_cityscape_seg_train2_eval.getlabels()
        elif id == 12:
            labels = labels_cityscape_seg_train2_eval.getlabels()
        elif id == 3:
            labels = labels_cityscape_seg_train3_eval.getlabels()
        elif id == 123:
            labels = labels_cityscape_seg_train3_eval.getlabels()

        return labels

    def _get_task_labels_cityscapes(self, id=None):
        if id is None:
            id = self.opt.task_to_val

        if id == 1:
            labels_task = labels_cityscape_seg_train1.getlabels()
        elif id == 2:
            labels_task = labels_cityscape_seg_train2.getlabels()
        elif id == 12:
            labels_task = labels_cityscape_seg_train2_eval.getlabels()
        elif id == 3:
            labels_task = labels_cityscape_seg_train3.getlabels()
        elif id == 123:
            labels_task = labels_cityscape_seg_train3_eval.getlabels()

        return labels_task

    def load_model(self):
        """Load model(s) from disk
        """
        base_path = os.path.split(self.log_path)[0]
        checkpoint_path = os.path.join(
            base_path, self.opt.load_model_name, 'models',
            'weights_{}'.format(self.opt.weights_epoch))
        assert os.path.isdir(checkpoint_path), \
            "Cannot find folder {}".format(checkpoint_path)
        print("loading model from folder {}".format(checkpoint_path))

        path = os.path.join(checkpoint_path, "{}.pth".format('model'))
        model_dict = self.model.state_dict()
        if self.opt.no_cuda:
            pretrained_dict = torch.load(path, map_location='cpu')
        else:
            pretrained_dict = torch.load(path)
        pretrained_dict = {
            k: v
            for k, v in pretrained_dict.items() if k in model_dict
        }
        model_dict.update(pretrained_dict)
        self.model.load_state_dict(model_dict)

    def calculate_metrics(self, epoch=None):
        print("-> Computing predictions with input size {}x{}".format(
            self.opt.height, self.opt.width))
        print("-> Evaluating")

        for data in self.test_loader:
            with torch.no_grad():
                input_color = data[("color_aug", 0, 0)]
                gt_seg = data[('segmentation', 0, 0)][:, 0, :, :].numpy()
                input_color = {
                    ("color_aug", 0, 0): input_color.to(self.device)
                }

                output = self.model(input_color)

                pred_seg = output['segmentation_logits'].float()

                # Apply task reduction for argmax
                if self.opt.task_to_val != 0:
                    pred_seg = pred_seg[:, self.task_low:self.task_high, ...]
                    gt_seg -= self.task_low  # gt_seg trainIDs must be in range(0, self.num_classes_score) to map them with torch.argmax output
                    gt_seg[
                        gt_seg == 255 - self.
                        task_low] = 255  # maintaining the background trainID

                # Save probabilities to disk
                if not self.opt.validate and self.opt.save_probs_to_disk:
                    self._save_probs_to_disk(
                        F.softmax(pred_seg, dim=1).cpu().numpy())

                pred_seg = F.interpolate(pred_seg,
                                         gt_seg[0].shape,
                                         mode='nearest')

                # Select most probable class
                pred_seg = torch.argmax(pred_seg, dim=1)

                pred_seg = pred_seg.cpu().numpy()
                self.metric_model.update(gt_seg, pred_seg)

                # Save predictions to disk
                if not self.opt.validate and self.opt.save_pred_to_disk:
                    self._save_pred_to_disk(pred_seg, gt_seg)

        metrics = self.metric_model.get_scores()

        # Save metrics
        if self.opt.val_frequency != 0:
            # Local epoch will not be specified if the validation is carried out standalone.
            if not self.opt.validate and epoch is None:
                epoch = int(self.opt.weights_epoch)

            self._save_metrics(epoch, metrics)

        self.metric_model.reset()
        print("\n  " + ("{:>8} | " * 2).format("miou", "maccuracy"))
        print(("&{: 8.3f}  " *
               2).format(metrics['meaniou'], metrics['meanacc']) + "\\\\")
        print("\n-> Done!")

    def _save_metrics(self, epoch, metrics):
        ''' Save metrics (class-wise) to disk as HDF5 file.
        '''
        # If a single model is validated, the output file will carry its epoch number in its file name. If a learning
        # process is validated "on the go", the output filename will just be "validation.h5".
        if not self.opt.validate:
            filename = 'validation_{:d}.h5'.format(epoch)
        else:
            filename = 'validation.h5'
        save_path = os.path.join(self.save_path, filename)

        # When _save_metrics is invoked for the first time, the HDF file will be opened in "w" mode overwriting any
        # existing file. In case of another invocation, the file will be opened in "a" mode not overwriting any
        # existing file but appending the data.
        if not self.called:
            mode = 'w'
            self.called = True
        else:
            mode = 'a'

        # If a single model is validated, all datasets reside in the first layer of the HDF file. If a learning process
        # is validated "on the go", each validated model will have its own group named after the epoch of the model.
        with h5.File(save_path, mode) as f:
            if self.opt.validate:
                grp = f.create_group('epoch_{:d}'.format(epoch))
            else:
                grp = f

            # Write mean_IoU, mean_acc and mean prec to file / group
            dset = grp.create_dataset('mean_IoU', data=metrics['meaniou'])
            dset.attrs[
                'Description'] = 'See trainIDs for information on the classes'
            dset = grp.create_dataset('mean_recall', data=metrics['meanacc'])
            dset.attrs[
                'Description'] = 'See trainIDs for information on the classes'
            dset.attrs['AKA'] = 'Accuracy -> TP / (TP + FN)'
            dset = grp.create_dataset('mean_precision',
                                      data=metrics['meanprec'])
            dset.attrs[
                'Description'] = 'See trainIDs for information on the classes'
            dset.attrs['AKA'] = 'Precision -> TP / (TP + FP)'

            # If in 'w' mode, allocate memory for class_id dataset
            if mode == 'w':
                ids = np.zeros(shape=(len(metrics['iou'])), dtype=np.uint32)

            class_iou = np.zeros(shape=(len(metrics['iou'])), dtype=np.float64)
            class_acc = np.zeros(shape=(len(metrics['acc'])), dtype=np.float64)
            class_prec = np.zeros(shape=(len(metrics['prec'])),
                                  dtype=np.float64)

            # Disassemble the dictionary
            for key, i in zip(sorted(metrics['iou']),
                              range(len(metrics['iou']))):
                if mode == 'w':
                    ids[i] = self.train_ids[i]  # int(key)
                class_iou[i] = metrics['iou'][key]
                class_acc[i] = metrics['acc'][key]
                class_prec[i] = metrics['prec'][key]

            # Create class_id dataset only once in first layer of HDF5 file when in 'w' mode
            if mode == 'w':
                dset = f.create_dataset('trainIDs', data=ids)
                dset.attrs['Description'] = 'trainIDs of classes'
                dset = f.create_dataset('first_epoch_in_file',
                                        data=np.array([epoch
                                                       ]).astype(np.uint32))
                dset.attrs[
                    'Description'] = 'First epoch that has been saved in this file.'

            dset = grp.create_dataset('class_IoU', data=class_iou)
            dset.attrs[
                'Description'] = 'See trainIDs for information on the class order'
            dset = grp.create_dataset('class_recall', data=class_acc)
            dset.attrs[
                'Description'] = 'See trainIDs for information on the class order'
            dset.attrs['AKA'] = 'Accuracy -> TP / (TP + FN)'
            dset = grp.create_dataset('class_precision', data=class_prec)
            dset.attrs[
                'Description'] = 'See trainIDs for information on the class order'
            dset.attrs['AKA'] = 'Precision -> TP / (TP + FP)'

    def _save_pred_to_disk(self, pred, gt):
        ''' Save a correctly coloured image of the prediction (batch) to disk. Only every self.opt.pred_frequency-th
            prediction is saved to disk!
        '''
        for i in range(gt.shape[0]):
            if self.img_idx % self.opt.pred_frequency == 0:
                o_size = gt[i].shape  # original image shape

                single_pred = pred[i].flatten()
                single_gt = gt[i].flatten()

                # Copy voids from ground truth to prediction
                if not self.opt.pred_wout_blend:
                    single_pred[single_gt == 255] = 255

                # Convert to colour
                single_pred = self._convert_to_colour(single_pred, o_size)
                single_gt = self._convert_to_colour(single_gt, o_size)

                # Save predictions to disk using an ongoing index
                cv2.imwrite(
                    os.path.join(self.img_path,
                                 'pred_val_{}.png'.format(self.img_idx)),
                    single_pred)
                cv2.imwrite(
                    os.path.join(self.img_path,
                                 'gt_val_{}.png'.format(self.img_idx)),
                    single_gt)

            self.img_idx += 1

    def _convert_to_colour(self, img, o_size):
        ''' Replace trainIDs in prediction with colours from dict, reshape it afterwards to input dimensions and
            convert RGB to BGR to match openCV's colour system.
        '''
        sort_idx = np.argsort(self.id_color_keys)
        idx = np.searchsorted(self.id_color_keys, img, sorter=sort_idx)
        img = np.asarray(self.id_color_vals)[sort_idx][idx]
        img = img.astype(np.uint8)
        img = np.reshape(img, newshape=(o_size[0], o_size[1], 3))
        img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)

        return img

    def _save_probs_to_disk(self, output):
        ''' Save the network output as numpy npy-file to disk. Only every self.opt.probs_frequency-th image is saved
            to disk!
        '''
        for i in range(output.shape[0]):
            if self.probs_idx % self.opt.probs_frequency == 0:
                np.save(
                    os.path.join(self.logit_path,
                                 'seg_logit_{}'.format(self.probs_idx)),
                    output[i])

            self.probs_idx += 1

    def _print_options(self):
        ''' Print validation options to stdout
        '''
        # Convert namespace to dictionary
        opts = vars(self.opt)

        # Get max key length for left justifying
        max_len = max([len(key) for key in opts.keys()])

        # Print options to stdout
        print(
            "+++++++++++++++++++++++++++ OPTIONS +++++++++++++++++++++++++++")
        for item in sorted(opts.items()):
            print(item[0].ljust(max_len), item[1])
        print(
            "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")

    def _save_opts(self, n_eval):
        """Save options to disk so we know what we ran this experiment with
        """
        to_save = self.opt.__dict__.copy()
        to_save['n_eval'] = n_eval
        if self.opt.validate:
            filename = 'eval_opt.json'
        else:
            filename = 'eval_opt_{}.json'.format(self.opt.weights_epoch)

        with open(os.path.join(self.save_path, filename), 'w') as f:
            json.dump(to_save, f, indent=2)
Ejemplo n.º 5
0
class DCMasking(object):
    def __init__(self, masking_from_epoch, num_epochs, moving_mask_percent, masking_linear_increase):
        self.masking_from_epoch = masking_from_epoch
        self.num_epochs = num_epochs
        self.moving_mask_percent = moving_mask_percent
        self.masking_linear_increase = masking_linear_increase

        self.segmentation_input_key = ('color_aug', 0, 0)
        self.logits_key = ('segmentation_logits', 0)

        self.metric_model_moving = SegmentationRunningScore(2)

        self.iou_thresh = dict()
        self.iou_thresh['non_moving'] = 0.0
        self.iou_thresh['moving'] = 0.0

        self.iou_log = dict()
        self.iou_log['non_moving'] = list()
        self.iou_log['moving'] = list()

    def _moving_class_criterion(self, segmentation):
        # TODO this is valid for the Cityscapes class definitions and has to be adapted for other datasets
        #  to be more generic
        mask = (segmentation > 10) & (segmentation < 100)
        return mask

    def compute_segmentation_frames(self, batch, model):
        batch_masking = deepcopy(batch)

        # get the depth indices
        batch_indices = tuple([idx_batch for idx_batch, sub_batch in enumerate(batch_masking)
                               if any('depth' in purpose_tuple
                                      for purpose_tuple in sub_batch['purposes'])
                               ])

        # get the depth images
        batch_masking = tuple([sub_batch for sub_batch in batch_masking
                               if any('depth' in purpose_tuple
                                      for purpose_tuple in sub_batch['purposes'])
                               ])

        # replace the purpose to segmentation
        for idx1, sub_batch in enumerate(batch_masking):
            for idx2, purpose_tuple in enumerate(sub_batch['purposes']):
                batch_masking[idx1]['purposes'][idx2] = tuple([purpose.replace('depth', 'segmentation')
                                                               for purpose in purpose_tuple])

        # generate the correct keys and outputs
        input_image_keys = [key for key in batch_masking[0].keys() if 'color_aug' in key]
        output_segmentation_keys = [('segmentation', key[1], key[2]) for key in input_image_keys]
        outputs_masked = list(dict() for i in range(len(batch)))

        # pass all depth image frames through the network to get the segmentation outputs
        for in_key, out_key in zip(input_image_keys, output_segmentation_keys):
            wanted_keys = ['domain', 'purposes', 'domain_idx', in_key]
            batch_masking_key = deepcopy(batch_masking)
            batch_masking_key = tuple([{key: sub_batch[key] for key in sub_batch.keys()
                                        if key in wanted_keys}
                                       for sub_batch in batch_masking_key])
            for idx1 in range(len(batch_masking_key)):
                batch_masking_key[idx1][self.segmentation_input_key] = \
                    batch_masking_key[idx1][in_key].clone()
                if in_key != self.segmentation_input_key:
                    del batch_masking_key[idx1][in_key]

            outputs_masked_key = model(batch_masking_key)
            cur_idx_outputs = 0
            for idx_batch in range(len(outputs_masked)):
                if idx_batch in batch_indices:
                    outputs_masked[idx_batch][out_key] = outputs_masked_key[cur_idx_outputs][self.logits_key].argmax(1)
                    cur_idx_outputs += 1
                else:
                    outputs_masked[idx_batch] = None

        outputs_masked = tuple(outputs_masked)
        return outputs_masked

    def compute_moving_mask(self, output_masked):
        """Compute moving mask and iou
                """
        segmentation = output_masked[("segmentation", 0, 0)]
        # Create empty mask
        moving_mask_combined = torch.zeros(segmentation.shape).to(segmentation.device)
        # Create binary mask moving in t = 0,  movable object = 1, non_movable = 0

        # Create binary masks (moving / non-moving)
        moving_mask = dict()
        moving_mask[0] = self._moving_class_criterion(segmentation).float()
        for key in output_masked.keys():
            if key[0] == "segmentation_warped":
                moving_mask[key[1]] = self._moving_class_criterion(output_masked[("segmentation_warped", key[1], 0)])

        # Calculate IoU for each frame separately
        for i in range(moving_mask[0].shape[0]):

            # Average score over frames
            for frame_id in moving_mask.keys():
                if frame_id == 0:
                    continue
                # For binary class
                self.metric_model_moving.update(
                    np.array(moving_mask[frame_id][i].cpu()), np.array(moving_mask[0][i].cpu()))

            scores = self.metric_model_moving.get_scores()

            if not np.isnan(scores['iou'][0]):
                self.iou_log['non_moving'].append(scores['iou'][0])
            if not np.isnan(scores['iou'][1]):
                self.iou_log['moving'].append(scores['iou'][1])
                # Calculate Mask if scores of moving objects is not NaN
                # mask every moving class, were the iou is smaller than threshold
                if scores['iou'][1] < self.iou_thresh['moving']:
                    # Add moving mask of t = 0
                    moving_mask_combined[i] += self._moving_class_criterion(segmentation[i]).float()
                    # Add moving mask of segmentation mask of t!=0 warped to t=0
                    for frame_id in moving_mask.keys():
                        if frame_id == 0:
                            continue
                        moving_mask_combined[i] += self._moving_class_criterion(
                            output_masked[("segmentation_warped", frame_id, 0)][i]).float()
                        # mask moving in t != 0
            self.metric_model_moving.reset()
        # movable object = 0, non_movable = 1
        output_masked['moving_mask'] = (moving_mask_combined < 1).float().detach()

    def clear_iou_log(self):
        self.iou_log = dict()
        self.iou_log['non_moving'] = list()
        self.iou_log['moving'] = list()

    def calculate_iou_threshold(self, current_epoch):
        if self.masking_from_epoch <= current_epoch:
            self.iou_thresh = dict()
            if self.masking_linear_increase:
                percentage = 1 - (1 / (self.num_epochs - 1 - self.masking_from_epoch) * (
                            current_epoch + 1 - self.masking_from_epoch))  # Mask 100 % to 0 %
            else:
                percentage = self.moving_mask_percent
            try:
                self.iou_thresh['non_moving'] = np.percentile(self.iou_log['non_moving'], (100 * percentage)).item()
            except Exception as e:
                self.iou_thresh['non_moving'] = 0.0
                print('Error calculating percentile of non_moving')
                print(e)
            try:
                self.iou_thresh['moving'] = np.percentile(self.iou_log['moving'], (100 * percentage)).item()
            except Exception as e:
                self.iou_thresh['moving'] = 0.0
                print('Error calculating percentile of moving')
                print(e)
Ejemplo n.º 6
0
    def __init__(self, options):
        self.opt = options
        self.device = torch.device("cpu" if self.opt.no_cuda else "cuda")

        # Assertions
        assert os.path.isfile(self.opt.image), "Invalid image!"
        self.opt.image.replace('/', os.sep)
        self.opt.image.replace('\\', os.sep)
        self.image_name = self.opt.image.split(os.sep)[-1]

        if self.opt.model_stage == 1:
            assert self.opt.task in {1}, "Invalid task!"
            assert not self.opt.with_weights, "Weights for stage 1 not available"
        elif self.opt.model_stage == 2:
            assert self.opt.task in {1, 2, 12}, "Invalid task!"
        elif self.opt.model_stage == 3:
            assert self.opt.task in {1, 2, 3, 12, 123}, "Invalid task!"

        # Model and task set-up
        self.num_classes_model = {1: 5, 2: 11, 3: 19}[self.opt.model_stage]
        self.task_low, self.task_high = {1: (0, 5), 2: (5, 11), 3: (11, 19), 12: (0, 11), 123: (0, 19)}[self.opt.task]

        # Create a conventional ERFNet
        self.model = ERFNet(self.num_classes_model, self.opt)
        self._load_model()
        self.model.to(self.device)
        self.model.eval()

        # Ground truth
        self.metrics = False
        if self.opt.ground_truth:
            assert os.path.isfile(self.opt.ground_truth), "Invalid ground truth!"
            self.metrics = True
            self.num_classes_score = self.task_high - self.task_low
            self.metric_model = SegmentationRunningScore(self.num_classes_score)

        # Output directory
        if self.opt.output_path:
            if not os.path.isdir(self.opt.output_path):
                os.makedirs(self.opt.output_path)
        else:
            self.opt.output_path = os.path.join(self.opt.image.split(os.sep)[:-1])
        image_extension_idx = self.image_name.rfind('.')
        segmentation_name = self.image_name[:image_extension_idx] + \
                            "_seg_stage_{}_task_{}".format(self.opt.model_stage, self.opt.task) + \
                            self.image_name[image_extension_idx:]
        self.output_image = os.path.join(self.opt.output_path, segmentation_name)
        ground_truth_name = self.image_name[:image_extension_idx] + \
                            "_gt_stage_{}_task_{}".format(self.opt.model_stage, self.opt.task) + \
                            self.image_name[image_extension_idx:]
        self.output_gt = os.path.join(self.opt.output_path, ground_truth_name)

        # stdout output
        print("++++++++++++++++++++++ INIT DEMO ++++++++++++++++++++++++")
        print("Image:\t {}".format(self.opt.image))
        print("GT:\t {}".format(self.opt.ground_truth))
        print("Output:\t {}".format(self.opt.output_path))
        print("Stage:\t {}".format(self.opt.model_stage))
        print("Weights: {}".format(self.opt.with_weights))
        print("Task:\t {}".format(self.opt.task))
        print("!!! MIND THAT THE MODELS WERE TRAINED USING AN IMAGE RESOLUTION OF 1024x512px !!!")

        # Class colours
        labels = labels_cityscape_seg_train3_eval.getlabels()
        colors = [(label.trainId - self.task_low, label.color) for label in labels if
                      label.trainId != 255 and label.trainId in range(0, 19)]
        colors.append((255, (0, 0, 0)))  # void class
        self.id_color = dict(colors)
        self.id_color_keys = [key for key in self.id_color.keys()]
        self.id_color_vals = [val for val in self.id_color.values()]
Ejemplo n.º 7
0
class Demo(object):
    def __init__(self, options):
        self.opt = options
        self.device = torch.device("cpu" if self.opt.no_cuda else "cuda")

        # Assertions
        assert os.path.isfile(self.opt.image), "Invalid image!"
        self.opt.image.replace('/', os.sep)
        self.opt.image.replace('\\', os.sep)
        self.image_name = self.opt.image.split(os.sep)[-1]

        if self.opt.model_stage == 1:
            assert self.opt.task in {1}, "Invalid task!"
            assert not self.opt.with_weights, "Weights for stage 1 not available"
        elif self.opt.model_stage == 2:
            assert self.opt.task in {1, 2, 12}, "Invalid task!"
        elif self.opt.model_stage == 3:
            assert self.opt.task in {1, 2, 3, 12, 123}, "Invalid task!"

        # Model and task set-up
        self.num_classes_model = {1: 5, 2: 11, 3: 19}[self.opt.model_stage]
        self.task_low, self.task_high = {1: (0, 5), 2: (5, 11), 3: (11, 19), 12: (0, 11), 123: (0, 19)}[self.opt.task]

        # Create a conventional ERFNet
        self.model = ERFNet(self.num_classes_model, self.opt)
        self._load_model()
        self.model.to(self.device)
        self.model.eval()

        # Ground truth
        self.metrics = False
        if self.opt.ground_truth:
            assert os.path.isfile(self.opt.ground_truth), "Invalid ground truth!"
            self.metrics = True
            self.num_classes_score = self.task_high - self.task_low
            self.metric_model = SegmentationRunningScore(self.num_classes_score)

        # Output directory
        if self.opt.output_path:
            if not os.path.isdir(self.opt.output_path):
                os.makedirs(self.opt.output_path)
        else:
            self.opt.output_path = os.path.join(self.opt.image.split(os.sep)[:-1])
        image_extension_idx = self.image_name.rfind('.')
        segmentation_name = self.image_name[:image_extension_idx] + \
                            "_seg_stage_{}_task_{}".format(self.opt.model_stage, self.opt.task) + \
                            self.image_name[image_extension_idx:]
        self.output_image = os.path.join(self.opt.output_path, segmentation_name)
        ground_truth_name = self.image_name[:image_extension_idx] + \
                            "_gt_stage_{}_task_{}".format(self.opt.model_stage, self.opt.task) + \
                            self.image_name[image_extension_idx:]
        self.output_gt = os.path.join(self.opt.output_path, ground_truth_name)

        # stdout output
        print("++++++++++++++++++++++ INIT DEMO ++++++++++++++++++++++++")
        print("Image:\t {}".format(self.opt.image))
        print("GT:\t {}".format(self.opt.ground_truth))
        print("Output:\t {}".format(self.opt.output_path))
        print("Stage:\t {}".format(self.opt.model_stage))
        print("Weights: {}".format(self.opt.with_weights))
        print("Task:\t {}".format(self.opt.task))
        print("!!! MIND THAT THE MODELS WERE TRAINED USING AN IMAGE RESOLUTION OF 1024x512px !!!")

        # Class colours
        labels = labels_cityscape_seg_train3_eval.getlabels()
        colors = [(label.trainId - self.task_low, label.color) for label in labels if
                      label.trainId != 255 and label.trainId in range(0, 19)]
        colors.append((255, (0, 0, 0)))  # void class
        self.id_color = dict(colors)
        self.id_color_keys = [key for key in self.id_color.keys()]
        self.id_color_vals = [val for val in self.id_color.values()]


    def _load_model(self):
        """Load model from disk
        """
        path = self.opt.checkpoint_path
        # checkpoint_path = os.path.join("models", "stage_{}".format(self.opt.model_stage))
        #assert os.path.isdir(checkpoint_path), \
        #    "Cannot find folder {}".format(checkpoint_path)

        # path = os.path.join(checkpoint_path, "{}.pth".format("with_weights" if self.opt.with_weights else "wout_weights"))
        model_dict = self.model.state_dict()
        if self.opt.no_cuda:
            pretrained_dict = torch.load(path, map_location='cpu')
        else:
            pretrained_dict = torch.load(path)
        pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
        model_dict.update(pretrained_dict)
        self.model.load_state_dict(model_dict)

    def process_image(self):
        # Required image transformations
        resize_interp = transforms.Resize((512, 1024), interpolation=pil.BILINEAR)
        transformer = transforms.ToTensor()
        normalize = transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))

        # Load Image
        image = cv2.imread(self.opt.image)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        image = pil.fromarray(image)
        native_image_size = image.size

        # Transform image
        image = resize_interp(image)
        image = transformer(image)
        image = normalize(image).unsqueeze(0).to(self.device)

        # Process image
        input_rgb = {("color_aug", 0, 0): image}
        output = self.model(input_rgb)

        # Process network output
        pred_seg = output['segmentation_logits'].float()
        pred_seg = pred_seg[:, self.task_low:self.task_high, ...]
        pred_seg = F.interpolate(pred_seg, (native_image_size[1], native_image_size[0]), mode='nearest')
        pred_seg = torch.argmax(pred_seg, dim=1)
        pred_seg = pred_seg.cpu().numpy()

        # Process ground truth
        gt = None
        if self.opt.ground_truth:
            gt = cv2.imread(self.opt.ground_truth, 0)
            gt[gt < self.task_low] = 255
            gt[gt >= self.task_high] = 255
            gt -= self.task_low
            gt[gt == 255 - self.task_low] = 255
            gt = np.expand_dims(gt, 0)

            self.metric_model.update(gt, pred_seg)
            metrics = self.metric_model.get_scores()
            self._save_metrics(metrics)
            print("\n  " + ("{:>8} | " * 2).format("miou", "maccuracy"))
            print(("&{: 8.3f}  " * 2).format(metrics['meaniou'], metrics['meanacc']) + "\\\\")

        # Save prediction to disk
        self._save_pred_to_disk(pred_seg, gt)

        print("\n-> Done!")


    def _save_metrics(self, metrics):
        ''' Save metrics (class-wise) to disk as HDF5 file.
        '''
        save_path = os.path.join(self.opt.output_path, "demo.h5")

        with h5.File(save_path, 'w') as f:
            grp = f

            # Write mean_IoU, mean_acc and mean prec to file / group
            dset = grp.create_dataset('mean_IoU', data=metrics['meaniou'])
            dset.attrs['Description'] = 'See trainIDs for information on the classes'
            dset = grp.create_dataset('mean_recall', data=metrics['meanacc'])
            dset.attrs['Description'] = 'See trainIDs for information on the classes'
            dset.attrs['AKA'] = 'Accuracy -> TP / (TP + FN)'
            dset = grp.create_dataset('mean_precision', data=metrics['meanprec'])
            dset.attrs['Description'] = 'See trainIDs for information on the classes'
            dset.attrs['AKA'] = 'Precision -> TP / (TP + FP)'

            ids = np.zeros(shape=(len(metrics['iou'])), dtype=np.uint32)

            class_iou = np.zeros(shape=(len(metrics['iou'])), dtype=np.float64)
            class_acc = np.zeros(shape=(len(metrics['acc'])), dtype=np.float64)
            class_prec = np.zeros(shape=(len(metrics['prec'])), dtype=np.float64)

            # Disassemble the dictionary
            for key, i in zip(sorted(metrics['iou']), range(len(metrics['iou']))):
                class_iou[i] = metrics['iou'][key]
                class_acc[i] = metrics['acc'][key]
                class_prec[i] = metrics['prec'][key]

            # Create class_id dataset only once in first layer of HDF5 file when in 'w' mode
            dset = f.create_dataset('trainIDs', data=ids)
            dset.attrs['Description'] = 'trainIDs of classes'

            dset = grp.create_dataset('class_IoU', data=class_iou)
            dset.attrs['Description'] = 'See trainIDs for information on the class order'
            dset = grp.create_dataset('class_recall', data=class_acc)
            dset.attrs['Description'] = 'See trainIDs for information on the class order'
            dset.attrs['AKA'] = 'Accuracy -> TP / (TP + FN)'
            dset = grp.create_dataset('class_precision', data=class_prec)
            dset.attrs['Description'] = 'See trainIDs for information on the class order'
            dset.attrs['AKA'] = 'Precision -> TP / (TP + FP)'

    def _save_pred_to_disk(self, pred, gt=None):
        ''' Save a correctly coloured image of the prediction (batch) to disk.
        '''
        pred = pred[0]
        o_size = pred.shape
        single_pred = pred.flatten()

        if gt is not None:
            single_gt = gt[0].flatten()
            single_pred[single_gt == 255] = 255
            single_gt = self._convert_to_colour(single_gt, o_size)
            cv2.imwrite(self.output_gt, single_gt)

        single_pred = self._convert_to_colour(single_pred, o_size)
        cv2.imwrite(self.output_image, single_pred)


    def _convert_to_colour(self, img, o_size):
        ''' Replace trainIDs in prediction with colours from dict, reshape it afterwards to input dimensions and
            convert RGB to BGR to match openCV's colour system.
        '''
        sort_idx = np.argsort(self.id_color_keys)
        idx = np.searchsorted(self.id_color_keys, img, sorter=sort_idx)
        img = np.asarray(self.id_color_vals)[sort_idx][idx]
        img = img.astype(np.uint8)
        img = np.reshape(img, newshape=(o_size[0], o_size[1], 3))
        img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)

        return img
    def __init__(self, options):

        print(" -> Executing script", os.path.basename(__file__))

        self.opt = options
        self.device = torch.device("cpu" if self.opt.no_cuda else "cuda")

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           LABELS AND CITIES
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        assert self.opt.train_set in {123, 1}, "Invalid train_set!"
        keys_to_load = ['color', 'segmentation']

        # Labels
        if self.opt.train_set == 1:
            labels = labels_cityscape_seg_train1.getlabels()
        else:
            labels = labels_cityscape_seg_train3_eval.getlabels()

        # Train IDs
        self.train_ids = set([labels[i].trainId for i in range(len(labels))])
        self.train_ids.remove(255)

        self.num_classes = len(self.train_ids)

        # Apply city filter
        folders_to_train = CitySet.get_city_set(0)
        if self.opt.city:
            folders_to_train = CitySet.get_city_set(self.opt.train_set)

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           DATASET DEFINITIONS
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        # Data augmentation
        train_data_transforms = [
            mytransforms.RandomHorizontalFlip(),
            mytransforms.CreateScaledImage(),
            mytransforms.Resize((self.opt.height, self.opt.width),
                                image_types=keys_to_load),
            mytransforms.RandomRescale(1.5),
            mytransforms.RandomCrop(
                (self.opt.crop_height, self.opt.crop_width)),
            mytransforms.ConvertSegmentation(),
            mytransforms.CreateColoraug(new_element=True,
                                        scales=self.opt.scales),
            mytransforms.ColorJitter(brightness=0.2,
                                     contrast=0.2,
                                     saturation=0.2,
                                     hue=0.1,
                                     gamma=0.0),
            mytransforms.RemoveOriginals(),
            mytransforms.ToTensor(),
            mytransforms.NormalizeZeroMean(),
        ]

        train_dataset = CityscapesDataset(
            dataset="cityscapes",
            trainvaltest_split='train',
            video_mode='mono',
            stereo_mode='mono',
            scales=self.opt.scales,
            labels_mode='fromid',
            labels=labels,
            keys_to_load=keys_to_load,
            data_transforms=train_data_transforms,
            video_frames=self.opt.video_frames,
            folders_to_load=folders_to_train,
        )

        self.train_loader = DataLoader(dataset=train_dataset,
                                       batch_size=self.opt.batch_size,
                                       shuffle=True,
                                       num_workers=self.opt.num_workers,
                                       pin_memory=True,
                                       drop_last=True)

        val_data_transforms = [
            mytransforms.CreateScaledImage(),
            mytransforms.Resize((self.opt.height, self.opt.width),
                                image_types=keys_to_load),
            mytransforms.ConvertSegmentation(),
            mytransforms.CreateColoraug(new_element=True,
                                        scales=self.opt.scales),
            mytransforms.RemoveOriginals(),
            mytransforms.ToTensor(),
            mytransforms.NormalizeZeroMean(),
        ]

        val_dataset = CityscapesDataset(
            dataset=self.opt.dataset,
            trainvaltest_split="train",
            video_mode='mono',
            stereo_mode='mono',
            scales=self.opt.scales,
            labels_mode='fromid',
            labels=labels,
            keys_to_load=keys_to_load,
            data_transforms=val_data_transforms,
            video_frames=self.opt.video_frames,
            folders_to_load=CitySet.get_city_set(-1))

        self.val_loader = DataLoader(dataset=val_dataset,
                                     batch_size=self.opt.batch_size,
                                     shuffle=False,
                                     num_workers=self.opt.num_workers,
                                     pin_memory=True,
                                     drop_last=True)

        self.val_iter = iter(self.val_loader)

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           LOGGING OPTIONS
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        print(
            "++++++++++++++++++++++ INIT TRAINING ++++++++++++++++++++++++++")
        print("Using dataset:\n  ", self.opt.dataset, "with split",
              self.opt.dataset_split)
        print(
            "There are {:d} training items and {:d} validation items\n".format(
                len(train_dataset), len(val_dataset)))

        path_getter = GetPath()
        log_path = path_getter.get_checkpoint_path()
        self.log_path = os.path.join(log_path, 'erfnet', self.opt.model_name)

        self.writers = {}
        for mode in ["train", "validation"]:
            self.writers[mode] = SummaryWriter(
                os.path.join(self.log_path, mode))

        # Copy this file to log dir
        shutil.copy2(__file__, self.log_path)

        print("Training model named:\n  ", self.opt.model_name)
        print("Models and tensorboard events files are saved to:\n  ",
              self.log_path)
        print("Training is using:\n  ", self.device)
        print("Training takes place on train set:\n  ", self.opt.train_set)
        print(
            "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           MODEL DEFINITION
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        # Instantiate model
        self.model = ERFNet(self.num_classes, self.opt)
        self.model.to(self.device)
        self.parameters_to_train = self.model.parameters()

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           OPTIMIZER SET-UP
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        self.model_optimizer = optim.Adam(params=self.parameters_to_train,
                                          lr=self.opt.learning_rate,
                                          weight_decay=self.opt.weight_decay)
        lambda1 = lambda epoch: pow((1 -
                                     ((epoch - 1) / self.opt.num_epochs)), 0.9)
        self.model_lr_scheduler = optim.lr_scheduler.LambdaLR(
            self.model_optimizer, lr_lambda=lambda1)

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           LOSSES
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        self.crossentropy = CrossEntropyLoss(ignore_background=True,
                                             device=self.device)
        self.crossentropy.to(self.device)

        self.metric_model = SegmentationRunningScore(self.num_classes)

        # Save all options to disk and print them to stdout
        self.save_opts(len(train_dataset), len(val_dataset))
        self._print_options()

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           EVALUATOR DEFINITION
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        if self.opt.validate:
            self.evaluator = Evaluator(self.opt, self.model)
class Trainer:
    def __init__(self, options):

        print(" -> Executing script", os.path.basename(__file__))

        self.opt = options
        self.device = torch.device("cpu" if self.opt.no_cuda else "cuda")

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           LABELS AND CITIES
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        assert self.opt.train_set in {123, 1}, "Invalid train_set!"
        keys_to_load = ['color', 'segmentation']

        # Labels
        if self.opt.train_set == 1:
            labels = labels_cityscape_seg_train1.getlabels()
        else:
            labels = labels_cityscape_seg_train3_eval.getlabels()

        # Train IDs
        self.train_ids = set([labels[i].trainId for i in range(len(labels))])
        self.train_ids.remove(255)

        self.num_classes = len(self.train_ids)

        # Apply city filter
        folders_to_train = CitySet.get_city_set(0)
        if self.opt.city:
            folders_to_train = CitySet.get_city_set(self.opt.train_set)

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           DATASET DEFINITIONS
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        # Data augmentation
        train_data_transforms = [
            mytransforms.RandomHorizontalFlip(),
            mytransforms.CreateScaledImage(),
            mytransforms.Resize((self.opt.height, self.opt.width),
                                image_types=keys_to_load),
            mytransforms.RandomRescale(1.5),
            mytransforms.RandomCrop(
                (self.opt.crop_height, self.opt.crop_width)),
            mytransforms.ConvertSegmentation(),
            mytransforms.CreateColoraug(new_element=True,
                                        scales=self.opt.scales),
            mytransforms.ColorJitter(brightness=0.2,
                                     contrast=0.2,
                                     saturation=0.2,
                                     hue=0.1,
                                     gamma=0.0),
            mytransforms.RemoveOriginals(),
            mytransforms.ToTensor(),
            mytransforms.NormalizeZeroMean(),
        ]

        train_dataset = CityscapesDataset(
            dataset="cityscapes",
            trainvaltest_split='train',
            video_mode='mono',
            stereo_mode='mono',
            scales=self.opt.scales,
            labels_mode='fromid',
            labels=labels,
            keys_to_load=keys_to_load,
            data_transforms=train_data_transforms,
            video_frames=self.opt.video_frames,
            folders_to_load=folders_to_train,
        )

        self.train_loader = DataLoader(dataset=train_dataset,
                                       batch_size=self.opt.batch_size,
                                       shuffle=True,
                                       num_workers=self.opt.num_workers,
                                       pin_memory=True,
                                       drop_last=True)

        val_data_transforms = [
            mytransforms.CreateScaledImage(),
            mytransforms.Resize((self.opt.height, self.opt.width),
                                image_types=keys_to_load),
            mytransforms.ConvertSegmentation(),
            mytransforms.CreateColoraug(new_element=True,
                                        scales=self.opt.scales),
            mytransforms.RemoveOriginals(),
            mytransforms.ToTensor(),
            mytransforms.NormalizeZeroMean(),
        ]

        val_dataset = CityscapesDataset(
            dataset=self.opt.dataset,
            trainvaltest_split="train",
            video_mode='mono',
            stereo_mode='mono',
            scales=self.opt.scales,
            labels_mode='fromid',
            labels=labels,
            keys_to_load=keys_to_load,
            data_transforms=val_data_transforms,
            video_frames=self.opt.video_frames,
            folders_to_load=CitySet.get_city_set(-1))

        self.val_loader = DataLoader(dataset=val_dataset,
                                     batch_size=self.opt.batch_size,
                                     shuffle=False,
                                     num_workers=self.opt.num_workers,
                                     pin_memory=True,
                                     drop_last=True)

        self.val_iter = iter(self.val_loader)

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           LOGGING OPTIONS
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        print(
            "++++++++++++++++++++++ INIT TRAINING ++++++++++++++++++++++++++")
        print("Using dataset:\n  ", self.opt.dataset, "with split",
              self.opt.dataset_split)
        print(
            "There are {:d} training items and {:d} validation items\n".format(
                len(train_dataset), len(val_dataset)))

        path_getter = GetPath()
        log_path = path_getter.get_checkpoint_path()
        self.log_path = os.path.join(log_path, 'erfnet', self.opt.model_name)

        self.writers = {}
        for mode in ["train", "validation"]:
            self.writers[mode] = SummaryWriter(
                os.path.join(self.log_path, mode))

        # Copy this file to log dir
        shutil.copy2(__file__, self.log_path)

        print("Training model named:\n  ", self.opt.model_name)
        print("Models and tensorboard events files are saved to:\n  ",
              self.log_path)
        print("Training is using:\n  ", self.device)
        print("Training takes place on train set:\n  ", self.opt.train_set)
        print(
            "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           MODEL DEFINITION
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        # Instantiate model
        self.model = ERFNet(self.num_classes, self.opt)
        self.model.to(self.device)
        self.parameters_to_train = self.model.parameters()

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           OPTIMIZER SET-UP
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        self.model_optimizer = optim.Adam(params=self.parameters_to_train,
                                          lr=self.opt.learning_rate,
                                          weight_decay=self.opt.weight_decay)
        lambda1 = lambda epoch: pow((1 -
                                     ((epoch - 1) / self.opt.num_epochs)), 0.9)
        self.model_lr_scheduler = optim.lr_scheduler.LambdaLR(
            self.model_optimizer, lr_lambda=lambda1)

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           LOSSES
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        self.crossentropy = CrossEntropyLoss(ignore_background=True,
                                             device=self.device)
        self.crossentropy.to(self.device)

        self.metric_model = SegmentationRunningScore(self.num_classes)

        # Save all options to disk and print them to stdout
        self.save_opts(len(train_dataset), len(val_dataset))
        self._print_options()

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           EVALUATOR DEFINITION
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        if self.opt.validate:
            self.evaluator = Evaluator(self.opt, self.model)

    def set_train(self):
        """Convert all models to training mode
        """
        self.model.train()

    def set_eval(self):
        """Convert all models to testing/evaluation mode
        """
        self.model.eval()

    def train(self):
        """Run the entire training pipeline
        """
        self.epoch = 0
        self.step = 0
        self.start_time = time.time()

        for self.epoch in range(self.opt.num_epochs):
            self.run_epoch()
            if (self.epoch + 1) % self.opt.save_frequency == 0:
                self.save_model()
            if self.opt.validate and (self.epoch +
                                      1) % self.opt.val_frequency == 0:
                self.run_eval()

    def run_epoch(self):
        """Run a single epoch of training and validation
        """
        print("Training")
        self.set_train()

        for batch_idx, inputs in enumerate(self.train_loader):

            before_op_time = time.time()

            outputs, losses = self.process_batch(inputs)

            self.model_optimizer.zero_grad()
            losses["loss"].backward()
            self.model_optimizer.step()
            duration = time.time() - before_op_time

            # log less frequently after the first 2000 steps to save time & disk space
            early_phase = batch_idx % self.opt.log_frequency == 0 and self.step < 2000
            late_phase = self.step % 2000 == 0

            if early_phase or late_phase:
                if ('segmentation', 0, 0) in inputs.keys():
                    metrics = self.compute_segmentation_losses(inputs, outputs)
                    self.log_time(batch_idx, duration,
                                  losses["loss"].cpu().data,
                                  metrics["meaniou"], metrics["meanacc"])
                else:
                    self.log_time(batch_idx, duration,
                                  losses["loss"].cpu().data, 0, 0)
                    metrics = {}
                self.log("train", losses, metrics)
                self.val()
            self.step += 1

        self.model_lr_scheduler.step()

    def run_eval(self):
        print("Validating on full validation set")
        self.set_eval()

        self.evaluator.calculate_metrics(self.epoch)

    def val(self):
        """Validate the model on a single minibatch
        """
        self.set_eval()
        try:
            inputs_val = self.val_iter.next()
        except StopIteration:
            self.val_iter = iter(self.val_loader)
            inputs_val = self.val_iter.next()

        with torch.no_grad():
            outputs_val, losses_val = self.process_batch(inputs_val)

            if ('segmentation', 0, 0) in inputs_val:
                metrics_val = self.compute_segmentation_losses(
                    inputs_val, outputs_val)
            else:
                metrics_val = {}

            self.log("validation", losses_val, metrics_val)

        self.set_train()

    def process_batch(self, inputs):
        """Pass a minibatch through the network and generate images and losses
        """
        for key, ipt in inputs.items():
            inputs[key] = ipt.to(self.device)
        outputs = self.model(inputs)
        losses = self.compute_losses(inputs, outputs)

        return outputs, losses

    def compute_losses(self, inputs, outputs):
        """Compute the reprojection and smoothness losses for a minibatch
        """
        losses = {}
        preds = F.log_softmax(outputs['segmentation_logits'].float(), dim=1)
        targets = inputs[('segmentation', 0, 0)][:, 0, :, :].long()
        cross_loss = self.crossentropy(preds, targets)
        losses["loss"] = cross_loss

        return losses

    def compute_segmentation_losses(self, inputs, outputs):
        """Compute the loss metrics based on the current prediction
        """
        label_true = np.array(inputs[('segmentation', 0, 0)].cpu())[:, 0, :, :]
        label_pred = np.array(outputs['segmentation'].detach().cpu())
        self.metric_model.update(label_true, label_pred)
        metrics = self.metric_model.get_scores()
        self.metric_model.reset()
        return metrics

    def log_time(self, batch_idx, duration, loss, miou, acc):
        """Print a logging statement to the terminal
        """
        samples_per_sec = self.opt.batch_size / duration
        print_string = "epoch {:>3} | batch {:>6} | examples/s: {:5.1f}" + \
                       " | loss: {:.5f}| meaniou: {:.5f}| meanacc: {:.5f}"
        print(
            print_string.format(self.epoch, batch_idx, samples_per_sec, loss,
                                miou, acc))

    def log(self, mode, losses, metrics):
        """Write an event to the tensorboard events file
        """
        writer = self.writers[mode]
        for l, v in losses.items():
            writer.add_scalar("{}".format(l), v, self.step)
        for l, v in metrics.items():
            if l in {'iou', 'acc', 'prec'}:
                continue
            writer.add_scalar("{}".format(l), v, self.step)

    def save_opts(self, n_train, n_eval):
        """Save options to disk so we know what we ran this experiment with
        """
        models_dir = os.path.join(self.log_path, "models")
        if not os.path.exists(models_dir):
            os.makedirs(models_dir)
        to_save = self.opt.__dict__.copy()
        to_save['n_train'] = n_train
        to_save['n_eval'] = n_eval

        with open(os.path.join(models_dir, 'opt.json'), 'w') as f:
            json.dump(to_save, f, indent=2)

    def save_model(self):
        """Save model weights to disk
        """
        save_folder = os.path.join(self.log_path, "models",
                                   "weights_{}".format(self.epoch))
        if not os.path.exists(save_folder):
            os.makedirs(save_folder)

        save_path = os.path.join(save_folder, "{}.pth".format("model"))
        to_save = self.model.state_dict()
        torch.save(to_save, save_path)

        save_path = os.path.join(save_folder, "{}.pth".format("optim"))
        torch.save(self.model_optimizer.state_dict(), save_path)

    def load_model(self, adam=True):
        """Load model(s) from disk
        :param adam: whether to load the Adam state too
        """
        base_path = os.path.split(self.log_path)[0]
        checkpoint_path = os.path.join(
            base_path, self.opt.load_model_name, 'models',
            'weights_{}'.format(self.opt.weights_epoch))
        assert os.path.isdir(checkpoint_path), \
            "Cannot find folder {}".format(checkpoint_path)
        print("loading model from folder {}".format(checkpoint_path))

        path = os.path.join(checkpoint_path, "{}.pth".format('model'))
        model_dict = self.model.state_dict()
        pretrained_dict = torch.load(path)
        pretrained_dict = {
            k: v
            for k, v in pretrained_dict.items() if k in model_dict
        }
        model_dict.update(pretrained_dict)
        self.model.load_state_dict(model_dict)

        if adam:
            # loading adam state
            optimizer_load_path = os.path.join(checkpoint_path,
                                               "{}.pth".format("optim"))
            if os.path.isfile(optimizer_load_path):
                print("Loading Adam weights")
                optimizer_dict = torch.load(optimizer_load_path)
                self.model_optimizer.load_state_dict(optimizer_dict)
            else:
                print(
                    "Cannot find Adam weights so Adam is randomly initialized")

    def _print_options(self):
        """Print training options to stdout so that they appear in the SLURM log
        """
        # Convert namespace to dictionary
        opts = vars(self.opt)

        # Get max key length for left justifying
        max_len = max([len(key) for key in opts.keys()])

        # Print options to stdout
        print(
            "+++++++++++++++++++++++++++ OPTIONS +++++++++++++++++++++++++++")
        for item in sorted(opts.items()):
            print(item[0].ljust(max_len), item[1])
        print(
            "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
Ejemplo n.º 10
0
 def __init__(self, remaps=('none',)):
     self.scores = dict(
         (remap_name, SegmentationRunningScore(self._remap_len(remap_name))) # scores = dict mit remap name und leerer confusion matrix
         for remap_name in remaps
     )
Ejemplo n.º 11
0
 def __init__(self, remaps=('none',)):
     self.scores = dict(
         (remap_name, SegmentationRunningScore(self._remap_len(remap_name)))
         for remap_name in remaps
     )