Example #1
0
    def calculate_scores(self, model, paths):
        model.eval()
        scores = []

        loader = DataLoader(PathsDataset(self.lmdb_handle, self.base_size,
                                         paths),
                            batch_size=self.batch_size,
                            shuffle=False,
                            num_workers=0)
        path_ctr = 0
        entropy_maps = {}
        with torch.no_grad():
            for sample in tqdm(loader):
                image_batch = sample['image'].cuda()
                label_batch = sample['label'].cuda()

                softmax = torch.nn.Softmax2d()
                output = softmax(model(image_batch))
                num_classes = output.shape[1]
                for batch_idx in range(output.shape[0]):
                    entropy_map = torch.cuda.FloatTensor(
                        output.shape[2], output.shape[3]).fill_(0)
                    for c in range(self.num_classes):
                        entropy_map = entropy_map - (
                            output[batch_idx, c, :, :] *
                            torch.log2(output[batch_idx, c, :, :] + 1e-12))
                    entropy_map[label_batch[batch_idx, :, :] == 255] = 0
                    scores.append(entropy_map.mean().cpu().item())
                    entropy_maps[paths[path_ctr]] = entropy_map.cpu().numpy()
                    path_ctr += 1
                torch.cuda.empty_cache()
        return scores, entropy_maps
Example #2
0
    def calculate_scores(self, model, paths, return_score_maps=False):
        model.eval()
        model.apply(turn_on_dropout)
        scores = []

        loader = DataLoader(PathsDataset(self.lmdb_handle, self.base_size,
                                         paths),
                            batch_size=self.batch_size,
                            shuffle=False,
                            num_workers=0)

        for sample in tqdm(loader, desc='Entropy'):
            image_batch = sample['image'].cuda()
            label_batch = sample['label'].cuda()
            if return_score_maps:
                scores.extend(
                    self.batch_entropy_func(model, image_batch, label_batch))
            else:
                scores.extend([
                    x.sum() for x in self.batch_entropy_func(
                        model, image_batch, label_batch)
                ])

        model.eval()
        return scores
Example #3
0
    def select_next_batch(self, model, training_set, selection_count):
        combined_paths = training_set.image_path_subset + training_set.remaining_image_paths
        loader = DataLoader(PathsDataset(self.lmdb_handle, self.base_size,
                                         combined_paths),
                            batch_size=self.batch_size,
                            shuffle=False,
                            num_workers=0)
        FEATURE_DIM = 2432
        average_pool_kernel_size = (32, 32)

        features = np.zeros((len(combined_paths), FEATURE_DIM))
        model.eval()
        model.set_return_features(True)

        average_pool_stride = average_pool_kernel_size[0] // 2
        with torch.no_grad():
            for batch_idx, sample in enumerate(tqdm(loader)):
                _, features_batch = model(sample['image'].cuda())
                features_batch = F.avg_pool2d(features_batch,
                                              average_pool_kernel_size,
                                              average_pool_stride)
                for feature_idx in range(features_batch.shape[0]):
                    features[batch_idx * self.batch_size +
                             feature_idx, :] = features_batch[
                                 feature_idx, :, :, :].cpu().numpy().flatten()

        model.set_return_features(False)
        selected_indices = self._select_batch(
            features, list(range(len(training_set.image_path_subset))),
            selection_count)
        training_set.expand_training_set(
            [combined_paths[i] for i in selected_indices])
Example #4
0
def visualize_vote_view_entropy(lmdb_handle, base_size, paths,
                                indices_to_dataset, vote_entropy_scores,
                                view_entropy_scores, scores):
    from dataloader.paths import PathsDataset
    dataset = PathsDataset(lmdb_handle, base_size, paths)

    for i, j in zip(indices_to_dataset, range(len(indices_to_dataset))):
        import matplotlib
        import matplotlib.pyplot as plt
        image_unnormalized = (
            (np.transpose(dataset[i]['image'].numpy(), axes=[1, 2, 0]) *
             (0.229, 0.224, 0.225) + (0.485, 0.456, 0.406)) * 255).astype(
                 np.uint8)

        plt.figure()
        plt.title('display')
        plt.subplot(1, 4, 1)
        plt.imshow(image_unnormalized)
        plt.subplot(1, 4, 2)
        norm_ent = matplotlib.colors.Normalize(
            vmin=0, vmax=visualize_entropy.max_entropy, clip=False)
        plt.imshow(vote_entropy_scores[j, :, :], norm=norm_ent, cmap='jet')
        plt.subplot(1, 4, 3)
        plt.imshow(view_entropy_scores[j, :, :], norm=norm_ent, cmap='jet')
        plt.subplot(1, 4, 4)
        norm = matplotlib.colors.Normalize(vmin=np.min(scores[j]),
                                           vmax=np.max(scores[j]),
                                           clip=False)
        plt.imshow(scores[j], norm=norm, cmap='jet')
        plt.savefig(os.path.join(constants.RUNS, 'image_dumps',
                                 f'ent_{visualize_entropy.save_idx:04d}.png'),
                    bbox_inches='tight')
        visualize_entropy.save_idx += 1
        plt.close()
        plt.show(block=False)
Example #5
0
    def calculate_scores(self, model, paths):
        model.eval()
        scores = []

        loader = DataLoader(PathsDataset(self.lmdb_handle, self.base_size,
                                         paths),
                            batch_size=self.batch_size,
                            shuffle=False,
                            num_workers=0)

        with torch.no_grad():
            for sample in tqdm(loader):
                image_batch = sample['image'].cuda()
                label_batch = sample['label'].cuda()
                softmax = torch.nn.Softmax2d()
                output = softmax(model(image_batch))
                for batch_idx in range(output.shape[0]):
                    most_confident_scores = torch.max(
                        output[batch_idx, :, :].squeeze(),
                        dim=0)[0].cpu().numpy()
                    output_numpy = output[batch_idx, :, :, :].cpu().numpy()
                    ndx = np.indices(output_numpy.shape)
                    second_most_confident_scores = output_numpy[
                        output_numpy.argsort(0), ndx[1], ndx[2]][-2]
                    margin = most_confident_scores - second_most_confident_scores
                    margin[(
                        label_batch[batch_idx, :, :] == 255).cpu().numpy()] = 1
                    scores.append(np.mean(margin))
                del output, margin
                torch.cuda.empty_cache()
        return scores
Example #6
0
    def select_next_batch_with_windows(self, model, training_set,
                                       selection_count):
        model.eval()
        model.apply(turn_on_dropout)

        weights = torch.cuda.FloatTensor(self.region_size,
                                         self.region_size).fill_(1.)
        loader = DataLoader(PathsDataset(self.lmdb_handle, self.base_size,
                                         training_set.all_train_paths),
                            batch_size=self.batch_size,
                            shuffle=False,
                            num_workers=0)
        map_ctr = 0
        scores = []

        for sample in tqdm(loader, desc='Entropy'):
            image_batch = sample['image'].cuda()
            label_batch = sample['label'].cuda()
            for batch_idx, entropy_map in enumerate(
                    self.vote_entropy_selector.batch_entropy_func(
                        model, image_batch, label_batch)):
                if training_set.all_train_paths[
                        map_ctr] in training_set.get_selections():
                    entropy_map[training_set.get_selections()[
                        training_set.all_train_paths[map_ctr]] == 1] = 0
                convolution_output = torch.nn.functional.conv2d(
                    torch.cuda.FloatTensor(entropy_map).unsqueeze(0).unsqueeze(
                        0),
                    weights.unsqueeze(0).unsqueeze(0)).squeeze().squeeze()
                scores.extend(self.nms(map_ctr, convolution_output))
                map_ctr += 1

        selected_samples = sorted(
            scores, key=lambda x: x[3],
            reverse=True)[:int(0.5 + selection_count * self.base_size[0] *
                               self.base_size[1] /
                               (self.region_size * self.region_size))]
        print('Last selected sample: ', selected_samples[-1])
        selected_regions = OrderedDict()

        total_pixels_selected = 0
        for ss in selected_samples:
            mask = np.zeros(self.base_size, dtype=np.int) == 1
            mask[ss[1]:ss[1] + self.region_size,
                 ss[2]:ss[2] + self.region_size] = True
            valid_pixels = mask.sum()
            total_pixels_selected += valid_pixels
            if training_set.all_train_paths[ss[0]] in selected_regions:
                selected_regions[training_set.all_train_paths[
                    ss[0]]] = selected_regions[training_set.all_train_paths[
                        ss[0]]] | mask
            else:
                selected_regions[training_set.all_train_paths[ss[0]]] = mask

        model.eval()
        print('Selected ',
              total_pixels_selected / (self.base_size[0] * self.base_size[1]),
              'images')
        training_set.expand_training_set(selected_regions, [])
Example #7
0
    def select_next_batch(self, model, training_set, selection_count):
        scores, entropy_maps = self.calculate_scores(
            model, training_set.remaining_image_paths)
        selected_samples = list(
            zip(*sorted(zip(scores, training_set.remaining_image_paths),
                        key=lambda x: x[0],
                        reverse=True)))[1][:selection_count]
        print(
            f'Selected Samples: {len(selected_samples)}/{len(training_set.remaining_image_paths)}'
        )
        unselected_samples = [
            x for x in entropy_maps if x not in selected_samples
        ]

        loader = DataLoader(PathsDataset(self.lmdb_handle, self.base_size,
                                         unselected_samples),
                            batch_size=self.batch_size,
                            shuffle=False,
                            num_workers=0)
        pseudolabels = {}
        path_ctr = 0
        pseudo_images_selected = 0
        with torch.no_grad():
            for sample in tqdm(loader):
                image_batch = sample['image'].cuda()
                label_batch = sample['label'].numpy()
                output = model(image_batch)
                for batch_idx in range(output.shape[0]):
                    prediction = np.argmax(
                        output[batch_idx, :, :, :].cpu().numpy().squeeze(),
                        axis=0).astype(np.uint8)
                    prediction[label_batch[batch_idx, :, :] == 255] = 255
                    qualified_area = entropy_maps[unselected_samples[
                        path_ctr]] <= self.current_entropy_threshold
                    qualified_area[label_batch[batch_idx, :, :] == 255] = False
                    if np.any(qualified_area):
                        prediction[qualified_area == False] = 255
                        pseudolabels[unselected_samples[path_ctr]] = prediction
                        pseudo_images_selected += qualified_area.sum() / (
                            self.base_size[0] * self.base_size[1])
                    path_ctr += 1
        model.eval()
        print(
            f'Pseudo Samples: {pseudo_images_selected}/{len(unselected_samples)}'
        )
        training_set.expand_training_set(selected_samples, pseudolabels)
        self.current_entropy_threshold -= self.entropy_change_per_selection
Example #8
0
    def calculate_scores(self, model, paths):
        model.eval()
        scores = []

        loader = DataLoader(PathsDataset(self.lmdb_handle, self.base_size, paths), batch_size=self.batch_size, shuffle=False, num_workers=0)
        
        with torch.no_grad():
            for sample in tqdm(loader):
                image_batch = sample['image'].cuda()
                label_batch = sample['label'].cuda()
                softmax = torch.nn.Softmax2d()
                max_conf_batch = torch.max(softmax(model(image_batch)), dim=1)[0]
                
                for batch_idx in range(max_conf_batch.shape[0]):
                    max_conf_batch[batch_idx, (label_batch[batch_idx, :, :] == 255)] = 1
                    scores.append(torch.mean(max_conf_batch[batch_idx, :, :]).cpu().item())
                del max_conf_batch
                torch.cuda.empty_cache()
        return scores
Example #9
0
 def _get_features_for_images(self, model, images):
     features = []
     loader = DataLoader(PathsDataset(self.lmdb_handle, self.base_size,
                                      images),
                         batch_size=self.batch_size,
                         shuffle=False,
                         num_workers=0)
     model.eval()
     model.set_return_features(True)
     average_pool_kernel_size = (32, 32)
     average_pool_stride = average_pool_kernel_size[0] // 2
     with torch.no_grad():
         for batch_idx, sample in enumerate(tqdm(loader)):
             image_batch = sample['image'].cuda()
             _, features_batch = model(image_batch)
             for feature_idx in range(features_batch.shape[0]):
                 features.append(
                     F.avg_pool2d(features_batch[feature_idx, :, :, :],
                                  average_pool_kernel_size,
                                  average_pool_stride).squeeze().cpu().
                     numpy().flatten())
     model.set_return_features(False)
     return features