Esempio n. 1
0
    def forward(self, source_features, img_tensor):
        locs = []
        confs = []
        priors = []
        for features, head in zip(source_features, self.heads):
            loc, conf, prior = head(features, img_tensor)
            locs.append(loc)
            confs.append(conf)
            priors.append(prior)

        batch = source_features[0].size(0)
        loc = torch.cat([o.view(batch, -1) for o in locs], 1)
        conf = torch.cat([o.view(batch, -1) for o in confs], 1)
        conf_softmax = F.softmax(conf.view(conf.size(0), -1, self.num_classes), dim=-1)

        with no_nncf_trace():
            priors = torch.cat(priors, dim=2)

        if self.training:
            return loc.view(batch, -1, 4), conf.view(batch, -1, self.num_classes), priors.view(1, 2, -1, 4)

        with no_nncf_trace():
            if self.loss_inference is True:
                return loc.view(batch, -1, 4), conf.view(batch, -1, self.num_classes), priors.view(1, 2, -1, 4)
            return self.detection_output(loc, conf_softmax.view(batch, -1), priors)
Esempio n. 2
0
 def _register_input(self, x: torch.Tensor):
     with no_nncf_trace():
         for pct, val in self._all_pct_values.items():
             np_vals = np_percentile_reduce_like(x.cpu().numpy(),
                                                 self._reduction_shape, pct)
             torch_vals = torch.from_numpy(np_vals).to(dtype=torch.float)
             val.append(torch_vals)
Esempio n. 3
0
    def forward(self, features, image_tensor):
        loc = self.loc(features)
        conf = self.conf(features)

        with no_nncf_trace():
            priors = self.prior_box(features, image_tensor).to(loc.device)
        # Knowledge Distillation Algo differentiates all model outputs with requires_grad=True.
        # Priors shouldn't be differentiated so they are explicitly excluded from backpropagation graph.

        priors = priors.detach()

        loc = loc.permute(0, 2, 3, 1).contiguous()
        conf = conf.permute(0, 2, 3, 1).contiguous()

        return loc, conf, priors
Esempio n. 4
0
    def forward(self, x):
        if is_debug():
            self.call_count += 1
        # TODO: refactor to get rid of extra if's and calls on each forward
        if not self.is_enabled_quantization():
            return x
        self.set_level_ranges()
        is_exporting = is_tracing_state()
        if is_exporting:
            with no_nncf_trace():
                x = self.run_export_quantization(x)

            # The underlying operator (registered via register_operator) must be executed,
            # otherwise the dynamic graph won't be traced as it was during regular inference.
            # While this does not impact the regular, non-RNN models, for which the graph
            # building and pre-/post-hook calling is only determined by input-agnostic,
            # graph-structure independent trace info (e.g. current op scope and call count),
            # this is important for LSTMs etc. where determining the "first nodes in iteration
            # scopes" depends on whether the input tensors to an operation were traced or not.
            return self.quantize(x, execute_traced_op_as_identity=True)

        return self.quantize(x, execute_traced_op_as_identity=False)
Esempio n. 5
0
    def set_mask(self, context_len, context):
        """
        sets self.mask which is applied before softmax
        ones for inactive context fields, zeros for active context fields

        :param context_len: b
        :param context: if batch_first: (b x t_k x n) else: (t_k x b x n)

        self.mask: (b x t_k)
        """

        if self.batch_first:
            max_len = context.size(1)
        else:
            max_len = context.size(0)

        indices = torch.arange(0,
                               max_len,
                               dtype=torch.int64,
                               device=context.device)
        with no_nncf_trace(
        ):  # TODO: remove once tensor type is stored in NNCF graph and is accessible to quant algo
            self.mask = indices >= (context_len.unsqueeze(1))
Esempio n. 6
0
 def _register_input(self, x: torch.Tensor):
     with no_nncf_trace():
         self._register_input_common(PTNNCFTensor(x))
Esempio n. 7
0
 def _register_input(self, x: torch.Tensor):
     with no_nncf_trace():
         self._samples.append(x.detach().cpu().numpy())
Esempio n. 8
0
 def calc_perturbation(self, module, inputs: Tensor, output: Tensor):
     input_ = inputs[0] if isinstance(inputs, tuple) else inputs
     with no_nncf_trace():
         self.perturbation = torch.norm(input_ - output, p=2) ** 2
         self.numels = input_.size().numel()
         self.input_norm = torch.norm(input_, p=2) ** 2