コード例 #1
0
ファイル: im_io.py プロジェクト: fluentgcc/kwimage
def _imread_turbojpeg(fpath):
    """
    See: https://www.learnopencv.com/efficient-image-loading/

    References:
        https://pypi.org/project/PyTurboJPEG/

    Bash:
        pip install PyTurboJPEG
        sudo apt install libturbojpeg -y

    Ignore:
        >>> # xdoctest: +REQUIRES(--network)
        >>> # xdoctest: +REQUIRES(turbojpeg)
        >>> import kwimage
        >>> rgb_fpath = kwimage.grab_test_image_fpath('amazon')
        >>> assert rgb_fpath.endswith('.jpg')
        >>> #
        >>> rgb = kwimage.imread(rgb_fpath)
        >>> gray = kwimage.convert_colorspace(rgb, 'rgb', 'gray')
        >>> gray_fpath = rgb_fpath + '.gray.jpg'
        >>> kwimage.imwrite(gray_fpath, gray)
        >>> #
        >>> fpath = gray_fpath
        >>> #
        >>> from kwimage.im_io import _imread_turbojpeg, _imread_skimage, _imread_cv2
        >>> import timerit
        >>> ti = timerit.Timerit(50, bestof=10, verbose=2)
        >>> #
        >>> for timer in ti.reset('turbojpeg'):
        >>>     with timer:
        >>>         im_turbo = _imread_turbojpeg(fpath)
        >>> #
        >>> for timer in ti.reset('cv2'):
        >>>     with timer:
        >>>         im_cv2 = _imread_cv2(fpath)
    """
    import turbojpeg
    jpeg = turbojpeg.TurboJPEG()
    with open(fpath, 'rb') as file:
        data = file.read()
        (width, height, jpeg_subsample,
         jpeg_colorspace) = jpeg.decode_header(data)
        # print('width = {!r}'.format(width))
        # print('height = {!r}'.format(height))
        # print('jpeg_subsample = {!r}'.format(jpeg_subsample))
        # print('jpeg_colorspace = {!r}'.format(jpeg_colorspace))
        if jpeg_colorspace == turbojpeg.TJCS_GRAY:
            pixel_format = turbojpeg.TJPF_GRAY
            src_space = 'gray'
            auto_dst_space = 'gray'
        else:
            pixel_format = turbojpeg.TJPF_RGB
            src_space = 'rgb'
            auto_dst_space = 'rgb'
        image = jpeg.decode(data, pixel_format=pixel_format)
    return image, src_space, auto_dst_space
コード例 #2
0
 def __init__(self, server_ip, port, parent=None):
     threading.Thread.__init__(self)
     self.parent = parent
     self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
     self.stop = False
     self.seq = 0
     self.server_ip = server_ip
     self.port = port
     self.jpeg = turbojpeg.TurboJPEG()
     self.geo = None
     self.show_img = ShowImage(10)
コード例 #3
0
ファイル: im_io.py プロジェクト: fluentgcc/kwimage
def _have_turbojpg():
    """
    pip install PyTurboJPEG

    """
    try:
        import turbojpeg  # NOQA
        turbojpeg.TurboJPEG()
    except Exception:
        return False
    else:
        return True
コード例 #4
0
ファイル: crop.py プロジェクト: christianbrugger/pagecrop
 def __init__(self):
     self.turbo = None
     try:
         import turbojpeg
     except ImportError:
         print(
             "INFO: libjpeg-turbo is not installed. Install with pip install PyTurboJPEG"
         )
     else:
         try:
             self.turbo = turbojpeg.TurboJPEG()
         except RuntimeError as exp:
             print("WARNING:", exp)
コード例 #5
0
ファイル: to_jpeg.py プロジェクト: lopuhin/kaggle-panda-2020
def image_to_jpeg(path: Path, suffix: str, image: np.ndarray):
    global _jpeg
    if _jpeg is None:
        _jpeg = turbojpeg.TurboJPEG()
    jpeg_path = path.parent / f'{path.stem}{suffix}.jpeg'
    if jpeg_path.exists():
        try:
            _jpeg.decode(jpeg_path.read_bytes())
        except Exception as e:
            print(e)
        else:
            return
    image = crop_white(image)
    jpeg_path.write_bytes(
        _jpeg.encode(image, quality=90, pixel_format=turbojpeg.TJPF_RGB))
コード例 #6
0
    def encode(cls, image: np.ndarray, config=None):
        if cls._encoder is None:

            config = config or GlobalConfig.get_global_config()
            path = config["turbojpeg"]["path"]
            quality = config["turbojpeg"]["quality"]

            cls._encoder = tj.TurboJPEG(path)
            cls._quality = quality

        if len(image.shape) == 2:
            return cls._encoder.encode(image[:, :, np.newaxis],
                                       quality=cls._quality,
                                       jpeg_subsample=tj.TJSAMP_GRAY,
                                       pixel_format=tj.TJPF_GRAY)
        else:
            raise NotImplementedError(
                "Multi-channel image encoding is not supported.")
コード例 #7
0
from turbojpeg import TJPF_BGR
import turbojpeg as JPEG
import cv2
import screenshot as sc
import numpy as np

jpeg = JPEG.TurboJPEG()
scn = sc.gpu_screenshots()

scn.start()
shot = scn.get_latest_frame()[1]
while shot is None:
    shot = scn.get_latest_frame()[1]
scn.stop()

shot = np.asarray(shot, dtype=np.uint8)
#print(shot)
jpg_file = jpeg.encode(shot)
with open('output1.jpg', 'wb') as f:
    f.write(jpg_file)
jpg_file = cv2.imencode('.jpg', shot)[1]
with open('output2.jpg', 'wb') as f:
    f.write(jpg_file)

with open('output1.jpg', 'rb') as f:
    jpg_file = jpeg.decode(f.read())
    jpg_file = jpeg.encode(jpg_file)
    with open('output3.jpg', 'wb') as fw:
        fw.write(jpg_file)
コード例 #8
0
import typing
import time

import cv2 as cv
import h5py
import numpy as np
import pickle
import pytorch_lightning
from torch import tensor
import torch.utils.data
import tqdm
import random

try:
    import turbojpeg
    turbojpeg = turbojpeg.TurboJPEG()
except ImportError:
    turbojpeg = None

import selfsupmotion.data.objectron.data_transforms
import selfsupmotion.data.objectron.sequence_parser
import selfsupmotion.data.objectron.utils
import selfsupmotion.data.utils


class ObjectronHDF5SequenceParser(torch.utils.data.Dataset):
    """Objectron HDF5 dataset parser.

    This class can be used to parse the (non-official) Objectron HDF5. More specifically, it allows
    random access over all sequences of the original dataset. See the HDF5 extractor's module for
    information on what the HDF5 files contain.
コード例 #9
0
 def __init__(self):
     self.config = config.SettingAccessor(self.config_prefix)
     self.encoder = tj.TurboJPEG(self.config["turbo_jpeg_library"])
コード例 #10
0
'''An self-contained image.'''

import aiofiles
import base64
import json
import turbojpeg as tj
_tj = tj.TurboJPEG()
import cv2

from mt import np
from mt.base import path, aio

__all__ = [
    'PixelFormat', 'Image', 'immload_asyn', 'immload', 'immsave_asyn',
    'immsave', 'imload', 'imsave', 'im_float2ubyte', 'im_ubyte2float'
]

PixelFormat = {
    'rgb': (tj.TJPF_RGB, 3, tj.TJSAMP_422),
    'bgr': (tj.TJPF_BGR, 3, tj.TJSAMP_422),
    'rgba': (tj.TJPF_RGBA, 4, tj.TJSAMP_422),
    'bgra': (tj.TJPF_BGRA, 4, tj.TJSAMP_422),
    'argb': (tj.TJPF_ARGB, 4, tj.TJSAMP_422),
    'abgr': (tj.TJPF_ABGR, 4, tj.TJSAMP_422),
    'gray': (tj.TJPF_GRAY, 1, tj.TJSAMP_GRAY),
}


class Image(object):
    '''A self-contained image, where the meta-data associated with the image are kept together with the image itself.
コード例 #11
0
def main():
    def id_by_path(path):
        return path.name.split('_')[0]

    paths = list(sorted(Path('data/train_images/').glob('*_2.jpeg')))
    path_by_id = {id_by_path(p): p for p in paths}
    index_by_id = {id_by_path(p): i for i, p in enumerate(paths)}

    jpeg = turbojpeg.TurboJPEG()
    orb = cv2.ORB_create()
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

    def read_image(path):
        image = jpeg.decode(path.read_bytes())
        ratio = 1024 / max(image.shape[:2])
        if ratio < 1:
            image = cv2.resize(image,
                               None,
                               fx=ratio,
                               fy=ratio,
                               interpolation=cv2.INTER_AREA)
        return image

    def descriptor_worker(path):
        image = read_image(path)
        return orb.detectAndCompute(image, None)

    with ThreadPool(processes=16) as pool:
        all_descriptors = list(
            tqdm.tqdm(pool.imap(descriptor_worker, paths, chunksize=10),
                      total=len(paths),
                      desc='descriptors'))

    df_train = pd.read_csv('data/train.csv')
    all_pairs = []
    for data_provider in ['radboud']:
        for grade in range(6):
            sim_paths = [
                path_by_id[image_id] for image_id in df_train.query(
                    f'data_provider == "{data_provider}" and '
                    f'isup_grade == {grade}')['image_id']
            ]
            print(f'provider {data_provider} grade {grade}: '
                  f'{len(sim_paths)} paths')

            def match_worker(p1p2):
                p1, p2 = p1p2
                (kp1, d1), (kp2, d2) = [
                    all_descriptors[index_by_id[id_by_path(p)]]
                    for p in [p1, p2]
                ]
                try:
                    matches = sorted(bf.match(d1, d2),
                                     key=lambda x: x.distance)
                except cv2.error as e:
                    print(e)
                    matches = []
                return p1, p2, kp1, kp2, matches

            pairs_to_match = [(p1, p2) for p1 in sim_paths for p2 in sim_paths
                              if p1 < p2]

            all_matches = list(
                tqdm.tqdm(map(match_worker, pairs_to_match),
                          total=len(pairs_to_match),
                          desc='matching'))

            good_matches = [
                x for x in all_matches
                if sum(x.distance < 25 for x in x[-1]) > 10
            ]
            print(f'found {len(good_matches)} matches')
            all_pairs.extend((p1, p2) for p1, p2, _, _, _ in good_matches)

    pairs_df = pd.DataFrame([{
        'a': id_by_path(p1),
        'b': id_by_path(p2)
    } for p1, p2 in all_pairs])
    pairs_df.to_csv('orb_duplicates.csv')
コード例 #12
0
ファイル: dataset.py プロジェクト: lopuhin/kaggle-panda-2020
 def jpeg(self):
     if self._jpeg is None:
         self._jpeg = turbojpeg.TurboJPEG()
     return self._jpeg