Пример #1
0
 def _prepare_for_export(self, **kwargs):
     m_count = 0
     for m in self.modules():
         if isinstance(m, MaskedConv1d):
             m.use_mask = False
             m_count += 1
     Exportable._prepare_for_export(self, **kwargs)
     logging.warning(f"Turned off {m_count} masked convolutions")
Пример #2
0
 def _prepare_for_export(self, **kwargs):
     m_count = 0
     for m in self.modules():
         if type(m).__name__ == "MaskedConv1d":
             m.use_mask = False
             m_count += 1
     if m_count > 0:
         logging.warning(f"Turned off {m_count} masked convolutions")
     Exportable._prepare_for_export(self, **kwargs)
Пример #3
0
 def _prepare_for_export(self, **kwargs):
     # extend masks to configured maximum
     max_len = self.pos_emb_max_len
     if 'input_example' in kwargs:
         m_len = kwargs['input_example'][0].size(-1)
         if m_len > max_len:
             max_len = m_len
     logging.info(f"Extending input audio length to {max_len}")
     self.set_max_audio_length(max_len)
     Exportable._prepare_for_export(self, **kwargs)
Пример #4
0
    def _prepare_for_export(self, **kwargs):
        m_count = 0
        for m in self.modules():
            if isinstance(m, MaskedConv1d):
                if self._rnnt_export:
                    pass
                else:
                    m.use_mask = False
                    m_count += 1
            if isinstance(m, SqueezeExcite):
                m._se_pool_step = m._se_pool_step_export

        Exportable._prepare_for_export(self, **kwargs)
        logging.warning(f"Turned off {m_count} masked convolutions")
Пример #5
0
    def _prepare_for_export(self, **kwargs):
        m_count = 0
        stride = 1
        one_hour = 100 * 60 * 60 * 1  # 1 sec / 0.01 window stride = 100 frames / second * 60 sec * 60 min * 1 hour

        for name, m in self.named_modules():
            if isinstance(m, MaskedConv1d):
                m.use_mask = False
                m_count += 1

            if isinstance(m, MaskedConv1d):
                if m.conv.stride[0] > 1 and 'mconv' in name:
                    stride = stride * m.conv.stride[0]

            if isinstance(m, SqueezeExcite):
                m.set_max_len(int(one_hour // stride))  # One hour divided by current stride level

        Exportable._prepare_for_export(self, **kwargs)
        logging.warning(f"Turned off {m_count} masked convolutions")
Пример #6
0
 def _prepare_for_export(self):
     Exportable._prepare_for_export(self)
Пример #7
0
    def export(
        self,
        output: str,
        input_example=None,
        output_example=None,
        verbose=False,
        export_params=True,
        do_constant_folding=True,
        keep_initializers_as_inputs=False,
        onnx_opset_version: int = 12,
        try_script: bool = False,
        set_eval: bool = True,
        check_trace: bool = True,
        use_dynamic_axes: bool = True,
    ):
        if input_example is not None or output_example is not None:
            logging.warning(
                "Passed input and output examples will be ignored and recomputed since"
                " IntentSlotClassificationModel consists of two separate models with different"
                " inputs and outputs.")

        if Exportable.get_format(output) is ExportFormat.TORCHSCRIPT:
            return super().export(
                output,
                self.bert_model.input_example(),
                None,
                verbose,
                export_params,
                do_constant_folding,
                keep_initializers_as_inputs,
                onnx_opset_version,
                try_script,
                set_eval,
                check_trace,
                use_dynamic_axes,
            )

        qual_name = self.__module__ + '.' + self.__class__.__qualname__
        output1 = os.path.join(os.path.dirname(output),
                               'bert_' + os.path.basename(output))
        output1_descr = qual_name + ' BERT exported to ONNX'
        bert_model_onnx = self.bert_model.export(
            output1,
            None,  # computed by input_example()
            None,
            verbose,
            export_params,
            do_constant_folding,
            keep_initializers_as_inputs,
            onnx_opset_version,
            try_script,
            set_eval,
            check_trace,
            use_dynamic_axes,
        )

        output2 = os.path.join(os.path.dirname(output),
                               'classifier_' + os.path.basename(output))
        output2_descr = qual_name + ' Classifier exported to ONNX'
        classifier_onnx = self.classifier.export(
            output2,
            None,  # computed by input_example()
            None,
            verbose,
            export_params,
            do_constant_folding,
            keep_initializers_as_inputs,
            onnx_opset_version,
            try_script,
            set_eval,
            check_trace,
            use_dynamic_axes,
        )

        output_model = attach_onnx_to_onnx(bert_model_onnx, classifier_onnx,
                                           "ISC")
        output_descr = qual_name + ' BERT+Classifier exported to ONNX'
        onnx.save(output_model, output)
        return ([output, output1,
                 output2], [output_descr, output1_descr, output2_descr])