# you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import numpy as np from lpot.utils.utility import LazyImport from .transform import transform_registry, BaseTransform tf = LazyImport('tensorflow') cv2 = LazyImport('cv2') @transform_registry(transform_type="QuantizedInput", \ process="preprocess", framework="tensorflow") class QuantizedInput(BaseTransform): """Convert the dtype of input to quantize it. Args: dtype(str): desired image dtype, support 'uint8', 'int8' scale(float, default=None):scaling ratio of each point in image Returns: tuple of processed image and label """ def __init__(self, dtype, scale=None):
# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import abstractmethod from lpot.utils.utility import LazyImport, singleton from ..utils import logger from sklearn.metrics import accuracy_score, f1_score import numpy as np torch_ignite = LazyImport('ignite') torch = LazyImport('torch') tf = LazyImport('tensorflow') mx = LazyImport('mxnet') @singleton class TensorflowMetrics(object): def __init__(self): self.metrics = { "Accuracy": WrapTensorflowMetric( tf.keras.metrics.Accuracy), "Sum": WrapTensorflowMetric( tf.keras.metrics.Sum, True), "Mean": WrapTensorflowMetric(
# you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import abstractmethod import functools from lpot.utils.utility import LazyImport, singleton torchvision = LazyImport('torchvision') tf = LazyImport('tensorflow') mx = LazyImport('mxnet') @singleton class TensorflowDatasets(object): def __init__(self): self.datasets = { "cifar10": tf.keras.datasets.cifar10, "cifar100": tf.keras.datasets.cifar100, "fashion_mnist": tf.keras.datasets.fashion_mnist, "imdb": tf.keras.datasets.imdb, "mnist": tf.keras.datasets.mnist, "reuters": tf.keras.datasets.reuters, "list_files": tf.data.Dataset.list_files,
# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import os import re import numpy as np from PIL import Image from lpot.utils.utility import LazyImport from lpot.utils import logger from .dataset import dataset_registry, IterableDataset, Dataset tf = LazyImport('tensorflow') mx = LazyImport('mxnet') torch = LazyImport('torch') @dataset_registry(dataset_type="ImagenetRaw", framework="onnxrt_qlinearops, \ onnxrt_integerops", dataset_format='') class ImagenetRaw(Dataset): """Configuration for Imagenet Raw dataset. Please arrange data in this way: data_path/img1.jpg data_path/img2.jpg ... data_path/imgx.jpg dataset will read name and label of each image from image_list file, if user set image_list to None, it will read from data_path/val_map.txt automatically.
# distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import copy import logging from collections import OrderedDict import yaml import numpy as np from lpot.adaptor.adaptor import adaptor_registry, Adaptor from lpot.adaptor.query import QueryBackendCapability from lpot.utils.utility import LazyImport, dump_elapsed_time onnx = LazyImport("onnx") ort = LazyImport("onnxruntime") logger = logging.getLogger() class ONNXRTAdaptor(Adaptor): """The ONNXRT adaptor layer, do onnx-rt quantization, calibration, inspect layer tensors. Args: framework_specific_info (dict): framework specific configuration for quantization. """ def __init__(self, framework_specific_info): super().__init__(framework_specific_info) self.__config_dict = {} self.quantizable_ops = []
# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .dataset import dataset_registry, Dataset import numpy as np from lpot.utils.utility import LazyImport import logging mx = LazyImport('mxnet') torch = LazyImport('torch') @dataset_registry(dataset_type="dummy", framework="tensorflow, onnxrt_qlinearops, \ onnxrt_integerops, pytorch, pytorch_ipex, mxnet", dataset_format='') class DummyDataset(Dataset): """Dataset used for dummy data generation. This Dataset is to construct a dataset from a specific shape. the value range is calculated from: low * stand_normal(0, 1) + high (TODO) construct dummy data from real dataset or iteration of data. Args: shape (list or tuple):support create multi shape tensors, use list of tuples for each tuple in the list, will create a such size tensor. low (list or float, default=-128.):low out the tensor value range from [0, 1] to [0, low] or [low, 0] if low < 0, if float, will implement all tensors with same low value.
# distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import os from abc import abstractmethod from lpot.utils.utility import LazyImport, compute_sparsity from lpot.utils import logger from lpot.conf.dotdict import deep_get, deep_set from lpot.conf import config as cfg from lpot.model.base_model import BaseModel from lpot.model.onnx_model import ONNXModel torch = LazyImport('torch') tf = LazyImport('tensorflow') mx = LazyImport('mxnet') onnx = LazyImport('onnx') ort = LazyImport("onnxruntime") yaml = LazyImport('yaml') json = LazyImport('json') tensor_to_node = lambda s: list(set([x.split(':')[0] for x in s])) def get_model_type(model): """Get mode type Args: model (string or model object): model path or model object
# you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import numpy as np from lpot.utils.utility import LazyImport from .transform import transform_registry, Transform tf = LazyImport('tensorflow') @transform_registry(transform_type="ParseDecodeImagenet", \ process="preprocess", framework="tensorflow") class ParseDecodeImagenetTransform(Transform): def __call__(self, sample): # Dense features in Example proto. feature_map = { 'image/encoded': tf.io.FixedLenFeature([], dtype=tf.string, default_value=''), 'image/class/label': tf.io.FixedLenFeature([1], dtype=tf.int64, default_value=-1) } sparse_float32 = tf.io.VarLenFeature(dtype=tf.float32) # Sparse features in Example proto.
# you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import abstractmethod import os from lpot.utils.utility import LazyImport, singleton from PIL import Image torch = LazyImport('torch') torchvision = LazyImport('torchvision') tf = LazyImport('tensorflow') mx = LazyImport('mxnet') np = LazyImport('numpy') hashlib = LazyImport('hashlib') gzip = LazyImport('gzip') tarfile = LazyImport('tarfile') zipfile = LazyImport('zipfile') pickle = LazyImport('pickle') glob = LazyImport('glob') @singleton class TensorflowDatasets(object): def __init__(self):
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import abstractmethod from lpot.utils.utility import LazyImport, singleton torch = LazyImport('torch') tf = LazyImport('tensorflow') @singleton class TensorflowCriterions(object): def __init__(self): self.criterions = {} self.criterions.update(TENSORFLOW_CRITERIONS) @singleton class PyTorchCriterions(object): def __init__(self): self.criterions = {} self.criterions.update(PYTORCH_CRITERIONS)
# you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from lpot.utils.utility import LazyImport from .base_dataloader import BaseDataLoader torch = LazyImport('torch') class PyTorchDataLoader(BaseDataLoader): def _generate_dataloader(self, dataset, batch_size, last_batch, collate_fn, sampler, batch_sampler, num_workers, pin_memory): drop_last = False if last_batch == 'rollover' else True return torch.utils.data.DataLoader(dataset, batch_size=batch_size, collate_fn=collate_fn, drop_last=drop_last, num_workers=num_workers, pin_memory=pin_memory, sampler=sampler, batch_sampler=batch_sampler)
# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import numpy as np from lpot.utils.utility import LazyImport from .base_dataloader import BaseDataLoader import logging mx = LazyImport('mxnet') class MXNetDataLoader(BaseDataLoader): def _generate_dataloader(self, dataset, batch_size, last_batch, collate_fn, sampler, batch_sampler, num_workers, pin_memory, shuffle): if shuffle: logging.warning('Shuffle is not supported yet in MXNetDataLoader, ' \ 'ignoring shuffle keyword.') drop_last = False if last_batch == 'rollover' else True return mx.gluon.data.DataLoader(dataset, batch_size=batch_size, batchify_fn=collate_fn, last_batch=last_batch, num_workers=num_workers,
# See the License for the specific language governing permissions and # limitations under the License. from lpot.utils.utility import LazyImport from abc import abstractmethod import collections import numpy as np from .sampler import IterableSampler, SequentialSampler, BatchSampler from .fetcher import FETCHERS from .default_dataloader import default_collate from .default_dataloader import DefaultDataLoader from ..datasets.bert_dataset import TensorflowBertDataset from .base_dataloader import BaseDataLoader from ..datasets.coco_dataset import COCORecordDataset tf = LazyImport('tensorflow') lpot = LazyImport('lpot') class TFDataDataLoader(BaseDataLoader): """In tensorflow1.x dataloader is coupled with the graph, but it also support feed_dict method to do session run, this dataloader is designed to satisfy the usage of feed dict in tf1.x. Although it's a general dataloader and can be used in MXNet and PyTorch. """ def __init__(self, dataset, batch_size=1, last_batch='rollover'): self.dataset = dataset self.last_batch = last_batch self._batch_size = batch_size dataset = dataset.batch(batch_size)