def perf_checks(): " Suggest to user what improvements they could do to their setup to speed things up" # libjpeg_turbo check from PIL import features, Image from packaging import version print("\n*** libjpeg-turbo status") if version.parse(Image.PILLOW_VERSION) >= version.parse("5.4.0"): if features.check_feature('libjpeg_turbo'): print("libjpeg-turbo is on") else: print( "libjpeg-turbo is not on. It's recommended you install libjpeg-turbo to speed up JPEG decoding. See https://docs.fast.ai/performance.html#libjpeg-turbo" ) else: print( f"libjpeg-turbo' status can't be derived - need Pillow(-SIMD)? >= 5.4.0 to tell, current version {Image.PILLOW_VERSION}" ) print("\n*** Pillow-SIMD status") if re.search(r'\.post\d+', Image.PILLOW_VERSION): print("Running Pillow-SIMD") else: print( "Running Pillow; It's recommended you install Pillow-SIMD to speed up image resizing and other operations. See https://docs.fast.ai/performance.html#pillow-simd" )
def test_check(): # Check the correctness of the convenience function for module in features.modules: assert features.check_module(module) == features.check(module) for codec in features.codecs: assert features.check_codec(codec) == features.check(codec) for feature in features.features: assert features.check_feature(feature) == features.check(feature)
def check_perf(): "Suggest how to improve the setup to speed things up" from PIL import features, Image from packaging import version print("Running performance checks.") # libjpeg_turbo check print("\n*** libjpeg-turbo status") #TODO: do some workarounf for later versions of Pillow pil_version = pillow_version() if version.parse(pil_version) >= version.parse("5.3.9"): if features.check_feature('libjpeg_turbo'): print("✔ libjpeg-turbo is on") else: print("✘ libjpeg-turbo is not on. It's recommended you install libjpeg-turbo to speed up JPEG decoding. See https://docs.fast.ai/performance.html#libjpeg-turbo") else: print(f"❓ libjpeg-turbo's status can't be derived - need Pillow(-SIMD)? >= 5.4.0 to tell, current version {Image.PILLOW_VERSION}") # XXX: remove this check/note once Pillow and Pillow-SIMD 5.4.0 is available pillow_ver_5_4_is_avail = pypi_module_version_is_available("Pillow", "5.4.0") if pillow_ver_5_4_is_avail == False: print("5.4.0 is not yet available, other than the dev version on github, which can be installed via pip from git+https://github.com/python-pillow/Pillow. See https://docs.fast.ai/performance.html#libjpeg-turbo") # Pillow-SIMD check #TODO: same as above print("\n*** Pillow-SIMD status") if re.search(r'\.post\d+', pil_version): print(f"✔ Running Pillow-SIMD {pil_version}") else: print(f"✘ Running Pillow {pil_version}; It's recommended you install Pillow-SIMD to speed up image resizing and other operations. See https://docs.fast.ai/performance.html#pillow-simd") # CUDA version check # compatibility table: k: min nvidia ver is required for v: cuda ver # note: windows nvidia driver version is slightly higher, see: # https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html # note: add new entries if pytorch starts supporting new cudaXX nvidia2cuda = { "410.00": "10.0", "384.81": "9.0", "367.48": "8.0", } print("\n*** CUDA status") if torch.cuda.is_available(): pynvml = load_pynvml_env() nvidia_ver = (pynvml.nvmlSystemGetDriverVersion().decode('utf-8') if platform.system() != "Darwin" else "Cannot be determined on OSX yet") cuda_ver = torch.version.cuda max_cuda = "8.0" for k in sorted(nvidia2cuda.keys()): if version.parse(nvidia_ver) > version.parse(k): max_cuda = nvidia2cuda[k] if version.parse(str(max_cuda)) <= version.parse(cuda_ver): print(f"✔ Running the latest CUDA {cuda_ver} with NVIDIA driver {nvidia_ver}") else: print(f"✘ You are running pytorch built against cuda {cuda_ver}, your NVIDIA driver {nvidia_ver} supports cuda10. See https://pytorch.org/get-started/locally/ to install pytorch built against the faster CUDA version.") else: print(f"❓ Running cpu-only torch version, CUDA check is not relevant") print("\nRefer to https://docs.fast.ai/performance.html to make sense out of these checks and suggestions.")
def check_perf(): "Suggest how to improve the setup to speed things up" from PIL import features, Image from packaging import version print("Running performance checks.") # libjpeg_turbo check print("\n*** libjpeg-turbo status") if version.parse(Image.PILLOW_VERSION) >= version.parse("5.3.9"): if features.check_feature('libjpeg_turbo'): print("✔ libjpeg-turbo is on") else: print("✘ libjpeg-turbo is not on. It's recommended you install libjpeg-turbo to speed up JPEG decoding. See https://docs.fast.ai/performance.html#libjpeg-turbo") else: print(f"❓ libjpeg-turbo's status can't be derived - need Pillow(-SIMD)? >= 5.4.0 to tell, current version {Image.PILLOW_VERSION}") # XXX: remove this check/note once Pillow and Pillow-SIMD 5.4.0 is available pillow_ver_5_4_is_avail = pypi_module_version_is_available("Pillow", "5.4.0") if pillow_ver_5_4_is_avail == False: print("5.4.0 is not yet available, other than the dev version on github, which can be installed via pip from git+https://github.com/python-pillow/Pillow. See https://docs.fast.ai/performance.html#libjpeg-turbo") # Pillow-SIMD check print("\n*** Pillow-SIMD status") if re.search(r'\.post\d+', Image.PILLOW_VERSION): print(f"✔ Running Pillow-SIMD {Image.PILLOW_VERSION}") else: print(f"✘ Running Pillow {Image.PILLOW_VERSION}; It's recommended you install Pillow-SIMD to speed up image resizing and other operations. See https://docs.fast.ai/performance.html#pillow-simd") # CUDA version check # compatibility table: k: min nvidia ver is required for v: cuda ver # note: windows nvidia driver version is slightly higher, see: # https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html # note: add new entries if pytorch starts supporting new cudaXX nvidia2cuda = { "410.00": "10.0", "384.81": "9.0", "367.48": "8.0", } print("\n*** CUDA status") if torch.cuda.is_available(): pynvml = load_pynvml_env() nvidia_ver = (pynvml.nvmlSystemGetDriverVersion().decode('utf-8') if platform.system() != "Darwin" else "Cannot be determined on OSX yet") cuda_ver = torch.version.cuda max_cuda = "8.0" for k in sorted(nvidia2cuda.keys()): if version.parse(nvidia_ver) > version.parse(k): max_cuda = nvidia2cuda[k] if version.parse(str(max_cuda)) <= version.parse(cuda_ver): print(f"✔ Running the latest CUDA {cuda_ver} with NVIDIA driver {nvidia_ver}") else: print(f"✘ You are running pytorch built against cuda {cuda_ver}, your NVIDIA driver {nvidia_ver} supports cuda10. See https://pytorch.org/get-started/locally/ to install pytorch built against the faster CUDA version.") else: print(f"❓ Running cpu-only torch version, CUDA check is not relevant") print("\nRefer to https://docs.fast.ai/performance.html to make sense out of these checks and suggestions.")
def test_check(self): # Check the correctness of the convenience function for module in features.modules: self.assertEqual(features.check_module(module), features.check(module)) for codec in features.codecs: self.assertEqual(features.check_codec(codec), features.check(codec)) for feature in features.features: self.assertEqual(features.check_feature(feature), features.check(feature))
def test_check(self): # Check the correctness of the convenience function for module in features.modules: self.assertEqual(features.check_module(module), features.check(module)) for codec in features.codecs: self.assertEqual(features.check_codec(codec), features.check(codec)) for feature in features.features: self.assertEqual(features.check_feature(feature), features.check(feature))
def get_quantization_method(self, value): method = None if value == "FastOctree": method = Image.FASTOCTREE elif value == "MedianCut": method = Image.MEDIANCUT elif value == "MaxCoverage": method = Image.MAXCOVERAGE elif value == "libimagequant": if features.check_feature(feature="libimagequant"): method = Image.LIBIMAGEQUANT else: self.log("Quantization method 'libimagequant' is unsupported by your platform.") self.log("Using {method} quantization method".format(method="default" if method is None else value)) return method
def check_perf(): "Suggest how to improve the setup to speed things up" from PIL import features, Image print("Running performance checks:") # Pillow-SIMD check try: pil_version = Image.__version__ # PIL >= 7 except: pil_version = Image.PILLOW_VERSION # PIL < 7 if re.search(r'\.post\d+', pil_version): print(f"✔ Running Pillow-SIMD {pil_version}") else: print(f"✘ Running Pillow {pil_version}") # libjpeg_turbo check if features.check_feature('libjpeg_turbo'): print("✔ libjpeg-turbo is on") else: print("✘ libjpeg-turbo is not on.")
import json import pickle from PIL import Image, features # from torchpie.logging import logger import logging logger = logging.getLogger(__name__) logger.info(f'pillow version: {Image.PILLOW_VERSION}') logger.info( 'Using jpeg-turbo: {}'.format(features.check_feature('libjpeg_turbo'))) def load_json(filename: str): with open(filename, 'r') as f: return json.load(f) def dump_json(obj, filename: str): with open(filename, 'w') as f: json.dump(obj, f) def load_pickle(filename: str): with open(filename, 'rb') as f: return pickle.load(f) def dump_pickle(obj, filename: str): with open(filename, 'wb') as f: pickle.dump(obj, f)
import PIL from PIL import features print(features.check_feature('libjpeg_turbo')) print(PIL.__version__)