Пример #1
0
    def export(self,
               model,
               export_path,
               metric_channels=None,
               export_onnx_path=None):
        """
        Wrapper method to export PyTorch model to Caffe2 model using :class:`~Exporter`.

        Args:
            export_path (str): file path of exported caffe2 model
            metric_channels (List[Channel]): outputs of model's execution graph
            export_onnx_path (str):file path of exported onnx model
        """
        # Make sure to put the model on CPU and disable CUDA before exporting to
        # ONNX to disable any data_parallel pieces
        cuda.CUDA_ENABLED = False
        precision.deactivate()

        model = model.cpu()
        if self.exporter:
            if metric_channels:
                print("Exporting metrics")
                self.exporter.export_to_metrics(model, metric_channels)
            print("Saving caffe2 model to: " + export_path)
            self.exporter.export_to_caffe2(model, export_path,
                                           export_onnx_path)
Пример #2
0
    def export(self, model, export_path, metric_channels=None, export_onnx_path=None):
        # Make sure to put the model on CPU and disable CUDA before exporting to
        # ONNX to disable any data_parallel pieces
        cuda.CUDA_ENABLED = False
        precision.deactivate()
        model = model.cpu()

        batch = next(iter(self.data.batches(Stage.TRAIN)))
        model.caffe2_export(
            self.data.tensorizers, batch, export_path, export_onnx_path=export_onnx_path
        )
Пример #3
0
    def torchscript_export(self, model, export_path):
        # Make sure to put the model on CPU and disable CUDA before exporting to
        # ONNX to disable any data_parallel pieces
        cuda.CUDA_ENABLED = False
        model.cpu()
        precision.deactivate(model)
        # Trace needs eval mode, to disable dropout etc
        model.eval()

        batch = next(iter(self.data.batches(Stage.TEST)))
        inputs = model.arrange_model_inputs(batch)
        trace = jit.trace(model, inputs)
        trace.save(export_path)
Пример #4
0
    def torchscript_export(self, model, export_path):
        # Make sure to put the model on CPU and disable CUDA before exporting to
        # ONNX to disable any data_parallel pieces
        cuda.CUDA_ENABLED = False
        model.cpu()
        precision.deactivate(model)
        # Trace needs eval mode, to disable dropout etc
        model.eval()
        model.prepare_for_onnx_export_()

        batch = next(iter(self.data.batches(Stage.TEST)))
        inputs = model.arrange_model_inputs(batch)
        trace = jit.trace(model, inputs)
        if hasattr(model, "torchscriptify"):
            trace = model.torchscriptify(self.data.tensorizers, trace)
        print(f"Saving torchscript model to: {export_path}")
        trace.save(export_path)
Пример #5
0
    def export(self, model, export_path, metric_channels=None, export_onnx_path=None):
        # Make sure to put the model on CPU and disable CUDA before exporting to
        # ONNX to disable any data_parallel pieces
        cuda.CUDA_ENABLED = False
        model = model.cpu()
        precision.deactivate(model)

        unused_raw_batch, batch = next(iter(self.data.batches(Stage.TRAIN)))
        if metric_channels:
            print("Exporting metrics")
            for mc in metric_channels:
                mc.export(model, model.arrange_model_inputs(batch))

        print(f"Saving caffe2 model to: {export_path}")
        return model.caffe2_export(
            self.data.tensorizers, batch, export_path, export_onnx_path=export_onnx_path
        )
Пример #6
0
    def torchscript_export(self, model, export_path=None, quantize=False):
        # Make sure to put the model on CPU and disable CUDA before exporting to
        # ONNX to disable any data_parallel pieces
        cuda.CUDA_ENABLED = False
        model.cpu()
        precision.deactivate(model)
        # Trace needs eval mode, to disable dropout etc
        model.eval()
        model.prepare_for_onnx_export_()

        unused_raw_batch, batch = next(iter(self.data.batches(Stage.TRAIN)))
        inputs = model.arrange_model_inputs(batch)
        # call model forward to set correct device types
        model(*inputs)
        if quantize:
            model.quantize()
        trace = jit.trace(model, inputs)
        if hasattr(model, "torchscriptify"):
            trace = model.torchscriptify(self.data.tensorizers, trace)
        trace.apply(lambda s: s._pack() if s._c._has_method("_pack") else None)
        if export_path is not None:
            print(f"Saving torchscript model to: {export_path}")
            trace.save(export_path)
        return trace