Exemple #1
0
    def forward(self, source_features, img_tensor):
        locs = []
        confs = []
        priors = []
        for features, head in zip(source_features, self.heads):
            loc, conf, prior = head(features, img_tensor)
            locs.append(loc)
            confs.append(conf)
            priors.append(prior)

        batch = source_features[0].size(0)
        loc = torch.cat([o.view(batch, -1) for o in locs], 1)
        conf = torch.cat([o.view(batch, -1) for o in confs], 1)
        conf_softmax = F.softmax(conf.view(conf.size(0), -1, self.num_classes),
                                 dim=-1)

        with no_nncf_trace():
            priors = torch.cat(priors, dim=2)

        if self.training:
            return loc.view(batch, -1, 4), conf.view(
                batch, -1, self.num_classes), priors.view(1, 2, -1, 4)

        with no_nncf_trace():
            if self.loss_inference is True:
                return loc.view(batch, -1, 4), conf.view(
                    batch, -1, self.num_classes), priors.view(1, 2, -1, 4)
            return self.detection_output(loc, conf_softmax.view(batch, -1),
                                         priors)
Exemple #2
0
 def calc_perturbation(self, module, inputs: torch.Tensor,
                       output: torch.Tensor):
     input_ = inputs[0] if isinstance(inputs, tuple) else inputs
     with no_nncf_trace():
         self.perturbation = torch.norm(input_ - output, p=2)**2
         self.numels = input_.size().numel()
         self.input_norm = torch.norm(input_, p=2)**2
Exemple #3
0
    def forward(self, features, image_tensor):
        loc = self.loc(features)
        conf = self.conf(features)

        with no_nncf_trace():
            priors = self.prior_box(features, image_tensor).to(loc.device)

        loc = loc.permute(0, 2, 3, 1).contiguous()
        conf = conf.permute(0, 2, 3, 1).contiguous()

        return loc, conf, priors
Exemple #4
0
 def _register_input(self, x: torch.Tensor):
     with no_nncf_trace():
         for reduction_shape in self._reduction_shapes:
             if reduction_shape not in self._all_min_values:
                 self._all_min_values[reduction_shape] = deque(
                     maxlen=self._window_size)
             if reduction_shape not in self._all_max_values:
                 self._all_max_values[reduction_shape] = deque(
                     maxlen=self._window_size)
             self._all_min_values[reduction_shape].append(
                 min_reduce_like(x, reduction_shape))
             self._all_max_values[reduction_shape].append(
                 max_reduce_like(x, reduction_shape))
Exemple #5
0
    def _register_input(self, x: torch.Tensor):
        with no_nncf_trace():
            for reduction_shape in self._reduction_shapes:
                min_reduced = min_reduce_like(x, reduction_shape)
                max_reduced = max_reduce_like(x, reduction_shape)
                # Have to use .get() because the inferred reduction shape is only known at first register_input call
                if self._min_values.get(reduction_shape) is None:
                    self._min_values[reduction_shape] = min_reduced
                else:
                    self._min_values[reduction_shape] = torch.min(
                        min_reduced, self._min_values[reduction_shape])

                if self._max_values.get(reduction_shape) is None:
                    self._max_values[reduction_shape] = max_reduced
                else:
                    self._max_values[reduction_shape] = torch.max(
                        max_reduced, self._max_values[reduction_shape])
Exemple #6
0
 def _register_input(self, x: torch.Tensor):
     with no_nncf_trace():
         self._samples.append(x.detach().cpu().numpy())
Exemple #7
0
 def register_input(self, x: torch.Tensor):
     with no_nncf_trace():
         self.min_values = torch.min(min_reduce_like(x, self.scale_shape),
                                     self.min_values)
         self.max_values = torch.max(max_reduce_like(x, self.scale_shape),
                                     self.max_values)
Exemple #8
0
 def register_input(self, x: torch.Tensor):
     with no_nncf_trace():
         self.input_history.put(x.detach().cpu().numpy())
Exemple #9
0
 def register_input(self, x: torch.Tensor):
     with no_nncf_trace():
         self.all_min_values.append(min_reduce_like(x, self.scale_shape))
         self.all_max_values.append(max_reduce_like(x, self.scale_shape))
Exemple #10
0
    def forward(ctx, loc_data, conf_data, prior_data, detection_output_params):
        """
        Args:
            loc_data: (tensor) Loc preds from loc layers
                Shape: [batch,num_priors*4]
            conf_data: (tensor) Shape: Conf preds from conf layers
                Shape: [batch,num_priors*num_classes]
            prior_data: (tensor) Prior boxes and variances from priorbox layers
                Shape: [1,2,num_priors*4]
        """
        with no_jit_trace(), no_nncf_trace():
            if detection_output_params.nms_threshold <= 0:
                raise ValueError('nms_threshold must be non negative.')
            device = loc_data.device
            batch_size = loc_data.size(0)  # batch size
            num_priors = int(loc_data.size(1) / 4)
            loc_data = loc_data.view(batch_size, num_priors, 4)
            conf_data = conf_data.view(batch_size, num_priors, -1)
            prior_data = prior_data.view(1, 2, num_priors, 4)
            output = torch.zeros(batch_size, 1,
                                 detection_output_params.keep_top_k,
                                 7).to(device)

            conf_preds = conf_data.view(
                batch_size, num_priors,
                detection_output_params.num_classes).transpose(2, 1)

            # Decode predictions into bboxes.
            for i in range(batch_size):
                output_for_img = torch.zeros(0, 7).to(device)
                decoded_boxes = decode(loc_data[i], prior_data[0])
                # For each class, perform nms
                conf_scores = conf_preds[i].clone()

                total_detections_count = 0
                all_indices = dict(
                )  # indices of confident detections for each class
                boxes = dict()
                for cl in range(0, detection_output_params.num_classes):
                    if cl == detection_output_params.background_label_id:
                        continue
                    c_mask = conf_scores[cl].gt(
                        detection_output_params.confidence_threshold)
                    scores = conf_scores[cl][c_mask]
                    if scores.dim() == 0:
                        continue
                    conf_scores[cl, :scores.size()[0]] = scores
                    conf_scores[cl, scores.size()[0]:] = 0
                    l_mask = c_mask.unsqueeze(1).expand_as(decoded_boxes)
                    boxes[cl] = decoded_boxes[l_mask].view(-1, 4)
                    # idx of highest scoring and non-overlapping boxes per class
                    all_indices[cl], count = nms(
                        boxes[cl], scores,
                        detection_output_params.nms_threshold,
                        detection_output_params.top_k)
                    all_indices[cl] = all_indices[cl][:count]
                    total_detections_count += count

                score_index_pairs = list(
                )  # list of tuples (score, label, idx)
                for label, indices in all_indices.items():
                    indices = indices.cpu().numpy()
                    for idx in indices:
                        score_index_pairs.append(
                            (conf_scores[label, idx], label, idx))

                score_index_pairs.sort(key=lambda tup: tup[0], reverse=True)
                score_index_pairs = score_index_pairs[:detection_output_params.
                                                      keep_top_k]

                all_indices_new = dict()
                for _, label, idx in score_index_pairs:
                    if label not in all_indices_new:
                        all_indices_new[label] = [idx]
                    else:
                        all_indices_new[label].append(idx)

                for label, indices in all_indices_new.items():
                    out = torch.cat(
                        (torch.zeros(
                            (len(indices), 1), dtype=torch.float).new_full(
                                (len(indices), 1), i).to(device),
                         torch.zeros(
                             (len(indices), 1), dtype=torch.float).new_full(
                                 (len(indices), 1), label).to(device),
                         conf_scores[label, indices].unsqueeze(1).to(device),
                         boxes[label][indices].to(device)), 1)
                    output_for_img = torch.cat((output_for_img, out), 0)

                output[i, 0, :output_for_img.size()[0]] = output_for_img
        return output