Пример #1
0
    def update_matching_counts(self, pred_boxes, target_boxes, counts):
        boxes_iou = iou(pred_boxes, target_boxes)
        pred_max_iou, _ = torch.max(boxes_iou, dim=1)
        gt_max_iou, _ = torch.max(boxes_iou, dim=0)

        for threshold in self.thresholds:
            matching_preds = pred_max_iou > threshold
            TP = sum(matching_preds)
            FP = len(matching_preds) - TP
            matching_gt = gt_max_iou > threshold
            FN = len(matching_gt) - sum(matching_gt)

            old_TP, old_FP, old_FN = counts[threshold]
            counts[threshold] = (old_TP + TP, old_FP + FP, old_FN + FN)
    def event(self, event):
        if event['name'] == 'minibatch' and event['phase'] == 'train':
            image_cu = event['inputs']['image'].cuda(non_blocking=True)
            label_cu = event['labels']['segmentation'].cuda(non_blocking=True)
            label_cu = torch.argmax(label_cu, 1).long()

            segmentation_result = self.forward(image_cu)

            loss = self.loss_cce(segmentation_result, label_cu)

            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()

            image = event['inputs']['image'].detach().numpy()
            image = image[0]
            image = np.moveaxis(image, 0, -1)
            cv2.imshow('image', image)

            label = event['labels']['segmentation'].detach().numpy()
            label_vis = label[0]
            label_vis = label_to_image(label_vis)
            cv2.imshow('label', label_vis)

            prediction = torch.softmax(segmentation_result.detach(),
                                       1).cpu().numpy()
            prediction_vis = prediction[0]
            prediction_vis = label_to_image(prediction_vis)
            cv2.imshow('prediction', prediction_vis)

            cv2.waitKey(1)

            class_iou_mean = class_iou(prediction, label).mean()
            iou_result = iou(prediction, label)

            wandb.log({
                "class_iou_mean": class_iou_mean,
                "iou_result": iou_result,
                "loss": loss.detach().cpu().numpy(),
            })

        if event['name'] == 'epoch_end':
            print('')
            self.save()
Пример #3
0
    def classify(detection, ground_truths, iou_thres):
        """
        classify detection as TP, FP or FN

        :param detection:
        :param ground_truths:
        :param iou_threshold:
        :return:  classification, 'unused' ground_truths
        """
        classification = Classification.FALSE_POSITIVE
        if len(ground_truths) == 0:
            return classification, ground_truths

        ious = iou(detection, ground_truths)
        high_enough_ious = ious >= iou_thres
        matching_class_ids = np.array([
            detection.class_id == ground_truth.class_id
            for ground_truth in ground_truths
        ])

        matching_ground_truths = np.array(list(zip(
            ious, ground_truths)))[high_enough_ious & matching_class_ids]
        remaining_ground_truths = ground_truths[~(high_enough_ious
                                                  & matching_class_ids)]

        matching_ground_truths.tolist().sort(key=lambda x: x[0], reverse=True)
        if len(matching_ground_truths) == 1:
            classification = Classification.TRUE_POSITIVE
        elif len(matching_ground_truths) > 1:
            classification = Classification.TRUE_POSITIVE
            for i in range(1, len(matching_ground_truths)):
                remaining_ground_truths = np.concatenate(
                    (remaining_ground_truths,
                     np.array([matching_ground_truths[i][1]])))

        return classification, remaining_ground_truths
    def event(self, event):
        super().event(event)

        if event['name'] == 'minibatch' and event['phase'] == 'train':
            image = event['inputs']['image']
            label_c = event['labels']['classification']
            label_s = event['labels']['segmentation']

            # Run input through adversary
            adversary_result = self.adversary(image, label_c)
            iou_label = label_s.clone().detach().cpu().numpy()
            iou_predi = adversary_result['segmentation'].clone().detach().cpu(
            ).numpy()
            max_indices = iou_predi.max(axis=1, keepdims=True) == iou_predi

            iou_predi = np.zeros(iou_predi.shape)
            iou_predi[max_indices] = 1
            score_iou = iou(iou_label[:, 1:], iou_predi[:, 1:])

            # Training controller
            self.step_count += 1
            if self.step == 'classifier':
                self.step_count_c += 1
                if self.step_count == 3:
                    self.step = 'adversary'
                    self.step_count = 0

                # Train classifier
                if random.random() > 0.5:
                    classification = self.classifier(image)
                else:
                    classification = self.classifier(adversary_result['erase'])
                loss_c_bce = self.classifier.loss_bce(classification, label_c)
                loss_c_bce.backward()
                self.classifier.optimizer.step()

                wandb.log({
                    "step_count_c": self.step_count_c,
                    "loss_c_bce": loss_c_bce,
                    "score_iou": score_iou
                })

            elif self.step == 'adversary':
                self.step_count_a += 1
                if self.step_count == 3:
                    self.step = 'classifier'
                    self.step_count = 0

                # Channel loss
                loss_a_channel = self.adversary.loss_bce(
                    adversary_result['classification'], label_c)

                # Constrain loss
                loss_a_mask = torch.mean(adversary_result['mask'])

                # Classifier loss
                c_spot = self.classifier(adversary_result['spot'])
                c_erase = self.classifier(adversary_result['erase'])
                loss_a_spot = self.classifier.loss_bce(c_spot, label_c)
                loss_a_erase = torch.mean(c_erase[label_c > 0.5])

                # Get adversary final loss
                loss_a_final = loss_a_erase + loss_a_mask + loss_a_channel

                loss_a_final.backward()
                self.adversary.optimizer.step()

                wandb.log({
                    "step_count_a": self.step_count_a,
                    "loss_a_final": loss_a_final,
                    "loss_a_mask": loss_a_mask,
                    "loss_a_erase": loss_a_channel,
                    "loss_a_spot": loss_a_spot,
                    "loss_a_channel": loss_a_channel,
                    "score_iou": score_iou
                })

                # Visualize adversary progress
                if self.step_count_a % 10 == 0:
                    image = self.demo_inputs['image'].clone()
                    label = self.dmeo_labels['classification'].clone()
                    image = move_to(image, self.device)
                    label = move_to(label, self.device)

                    adversary_result = self.adversary(image, label)

                    for typez in [
                            'vis_output', 'vis_mask', 'vis_erase', 'vis_spot'
                    ]:
                        output = adversary_result[typez]
                        for i, o in enumerate(output):
                            cv2.imwrite(
                                artifact_manager.getDir() +
                                f'/{typez}_{i}_{self.step_count_vis}.png',
                                o * 255)

                    self.step_count_vis += 1

            # Clear gradients
            self.classifier.optimizer.zero_grad()
            self.adversary.optimizer.zero_grad()

            cv2.imshow(
                'image',
                np.moveaxis(image[0].clone().detach().cpu().numpy(), 0, -1))
            cv2.imshow(
                'label',
                label_to_image(label_s[0].clone().detach().cpu().numpy()))
            cv2.imshow('output', adversary_result['vis_output'][0])
            cv2.imshow('mask', adversary_result['vis_mask'][0])
            cv2.imshow('erase', adversary_result['vis_erase'][0])
            cv2.imshow('spot', adversary_result['vis_spot'][0])

            cv2.waitKey(1)
        inputs_2 = inputs_2[:, :, np.newaxis]
        output = outputs_np[sample, 0]
        label = labels_np[sample]
        original = original_labels_np[sample]

        output = output[:, :, np.newaxis]

        final = np.zeros((256, 256*5, 3))
        final[:, 0:256, :] = inputs_1
        
        final[:, 256:512, :] = inputs_2
        final[:, 512:768, :] = output
        final[:, 768:1024, :] = label
        final[:, 1024:1280, :] = original

        iou_input = iou(inputs_2, original)
        iou_output = iou(output, original)
        iou_label = iou(label, original)
        iou_original = iou(original, original)

        if (iou_output > iou_label):
            better_count+= 1
        iou_input_total += iou_input
        iou_output_total += iou_output
        iou_label_total += iou_label
        iou_original_total += iou_original

        final[:, 254:256, :] = (0, 0, 1)
        final[:, 510:512, :] = (0, 0, 1)
        final[:, 766:768, :] = (0, 0, 1)