Пример #1
0
    def forward(self, **params):
        new_params = []
        for param_name, param_value in params.items():
            # In case of None weight (or bias) mask shouldn't be applied
            if param_value is None:
                new_params.append(param_value)
                continue

            # For weights self.mask_applying_dim should be used, for bias dim=0
            dim = 0 if param_name == 'bias' else self.mask_applying_dim
            if is_tracing_state():
                with no_jit_trace():
                    new_params.append(
                        inplace_apply_filter_binary_mask(
                            self.binary_filter_pruning_mask,
                            param_value,
                            node_name_for_logging=self.node_name,
                            dim=dim))
            else:
                new_params.append(
                    apply_filter_binary_mask(
                        self.binary_filter_pruning_mask,
                        param_value,
                        node_name_for_logging=self.node_name,
                        dim=dim))
        return new_params
Пример #2
0
    def forward(self, inputs):
        data_sub1 = inputs
        features_sub1 = self.highres_branch(data_sub1)

        data_sub2 = F.interpolate(data_sub1, self._input_size_hw_ds2,
                                  **self.sampling_params)
        features_sub2 = self.mediumres_branch(data_sub2)

        # Contrary to the ICNet paper Fig.2 , the low-resolution branch does not receive separate
        # 4x-downsampled image input, but instead reuses feature maps from the medium-resolution
        # branch.

        data_sub4 = F.interpolate(features_sub2, self._input_size_hw_ds32,
                                  **self.sampling_params)
        features_sub4 = self.lowres_branch(data_sub4)

        if self.training:
            fused_features_sub42, label_scores_ds16 = self.cff42(
                features_sub4, features_sub2)
            fused_features_sub421, label_scores_ds8 = self.cff421(
                fused_features_sub42, features_sub1)

            fused_features_ds4 = F.interpolate(fused_features_sub421,
                                               self._input_size_hw_ds4,
                                               **self.sampling_params)
            label_scores_ds4 = self.conv6_cls(fused_features_ds4)

            return OrderedDict([("ds4", label_scores_ds4),
                                ("ds8", label_scores_ds8),
                                ("ds16", label_scores_ds16)])

        fused_features_sub42 = self.cff42(features_sub4, features_sub2)
        fused_features_sub421 = self.cff421(fused_features_sub42,
                                            features_sub1)

        fused_features_ds4 = F.interpolate(fused_features_sub421,
                                           self._input_size_hw_ds4,
                                           **self.sampling_params)
        label_scores_ds4 = self.conv6_cls(fused_features_ds4)
        label_scores = F.interpolate(label_scores_ds4, self._input_size_hw,
                                     **self.sampling_params)
        if is_tracing_state() and parse_version(
                torch.__version__) >= parse_version("1.1.0"):
            # While exporting, add extra post-processing layers into the graph
            # so that the model outputs class probabilities instead of class scores
            softmaxed = F.softmax(label_scores, dim=1)
            return softmaxed
        return label_scores
Пример #3
0
    def forward(self, x):
        blocks = []
        for i, down in enumerate(self.down_path):
            x = down(x)
            if i != len(self.down_path) - 1:
                blocks.append(x)
                x = F.max_pool2d(x, 2)

        for i, up in enumerate(self.up_path):
            x = up(x, blocks[-i - 1])

        x = self.last(x)
        if is_tracing_state() and parse_version(
                torch.__version__) >= parse_version("1.1.0"):
            # While exporting, add extra post-processing layers into the graph
            # so that the model outputs class probabilities instead of class scores
            softmaxed = F.softmax(x, dim=1)
            return softmaxed
        return x
Пример #4
0
    def forward(self, x):
        if is_debug():
            self.call_count += 1
        # TODO: refactor to get rid of extra if's and calls on each forward
        if not self.is_enabled_quantization():
            return x
        self.set_level_ranges()
        is_exporting = is_tracing_state()
        if is_exporting:
            with no_nncf_trace():
                x = self.run_export_quantization(x)

            # The underlying operator (registered via register_operator) must be executed,
            # otherwise the dynamic graph won't be traced as it was during regular inference.
            # While this does not impact the regular, non-RNN models, for which the graph
            # building and pre-/post-hook calling is only determined by input-agnostic,
            # graph-structure independent trace info (e.g. current op scope and call count),
            # this is important for LSTMs etc. where determining the "first nodes in iteration
            # scopes" depends on whether the input tensors to an operation were traced or not.
            return self.quantize(x, execute_traced_op_as_identity=True)

        return self.quantize(x, execute_traced_op_as_identity=False)
Пример #5
0
 def forward(self, weight):
     if is_tracing_state():
         with no_jit_trace():
             return weight.mul_(self.binary_mask)
     tmp_tensor = self._calc_training_binary_mask(weight)
     return apply_binary_mask_impl(tmp_tensor, weight)