Beispiel #1
0
def extract_boxes(path=DATASETS_DIR / 'coco128'):  # from utils.dataloaders import *; extract_boxes()
    # Convert detection dataset into classification dataset, with one directory per class
    path = Path(path)  # images dir
    shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None  # remove existing
    files = list(path.rglob('*.*'))
    n = len(files)  # number of files
    for im_file in tqdm(files, total=n):
        if im_file.suffix[1:] in IMG_FORMATS:
            # image
            im = cv2.imread(str(im_file))[..., ::-1]  # BGR to RGB
            h, w = im.shape[:2]

            # labels
            lb_file = Path(img2label_paths([str(im_file)])[0])
            if Path(lb_file).exists():
                with open(lb_file) as f:
                    lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32)  # labels

                for j, x in enumerate(lb):
                    c = int(x[0])  # class
                    f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg'  # new filename
                    if not f.parent.is_dir():
                        f.parent.mkdir(parents=True)

                    b = x[1:] * [w, h, w, h]  # box
                    # b[2:] = b[2:].max()  # rectangle to square
                    b[2:] = b[2:] * 1.2 + 3  # pad
                    b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)

                    b[[0, 2]] = np.clip(b[[0, 2]], 0, w)  # clip boxes outside of image
                    b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
                    assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
Beispiel #2
0
    def on_train_end(self, last, best, plots, epoch, results):
        # Callback runs on training end
        if plots:
            plot_results(file=self.save_dir /
                         'results.csv')  # save results.png
        files = [
            'results.png', 'confusion_matrix.png',
            *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))
        ]
        files = [(self.save_dir / f) for f in files
                 if (self.save_dir / f).exists()]  # filter

        if self.tb:
            for f in files:
                self.tb.add_image(f.stem,
                                  cv2.imread(str(f))[..., ::-1],
                                  epoch,
                                  dataformats='HWC')

        if self.wandb:
            self.wandb.log({k: v
                            for k, v in zip(self.keys[3:10], results)
                            })  # log best.pt val results
            self.wandb.log({
                "Results":
                [wandb.Image(str(f), caption=f.name) for f in files]
            })
            # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model
            if not self.opt.evolve:
                wandb.log_artifact(str(best if best.exists() else last),
                                   type='model',
                                   name='run_' + self.wandb.wandb_run.id +
                                   '_model',
                                   aliases=['latest', 'best', 'stripped'])
            self.wandb.finish_run()
Beispiel #3
0
 def load_image(self, i):
     # Loads 1 image from dataset index 'i', returns (im, original hw, resized hw)
     im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i],
     if im is None:  # not cached in RAM
         if fn.exists():  # load npy
             im = np.load(fn)
         else:  # read image
             im = cv2.imread(f)  # BGR
             assert im is not None, f'Image Not Found {f}'
         h0, w0 = im.shape[:2]  # orig hw
         r = self.img_size / max(h0, w0)  # ratio
         if r != 1:  # if sizes are not equal
             interp = cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA
             im = cv2.resize(im, (int(w0 * r), int(h0 * r)), interpolation=interp)
         return im, (h0, w0), im.shape[:2]  # im, hw_original, hw_resized
     return self.ims[i], self.im_hw0[i], self.im_hw[i]  # im, hw_original, hw_resized
Beispiel #4
0
 def _hub_ops(f, max_dim=1920):
     # HUB ops for 1 image 'f': resize and save at reduced quality in /dataset-hub for web/app viewing
     f_new = im_dir / Path(f).name  # dataset-hub image filename
     try:  # use PIL
         im = Image.open(f)
         r = max_dim / max(im.height, im.width)  # ratio
         if r < 1.0:  # image too large
             im = im.resize((int(im.width * r), int(im.height * r)))
         im.save(f_new, 'JPEG', quality=75, optimize=True)  # save
     except Exception as e:  # use OpenCV
         print(f'WARNING: HUB ops PIL failure {f}: {e}')
         im = cv2.imread(f)
         im_height, im_width = im.shape[:2]
         r = max_dim / max(im_height, im_width)  # ratio
         if r < 1.0:  # image too large
             im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_AREA)
         cv2.imwrite(str(f_new), im)
Beispiel #5
0
    def __next__(self):
        if self.count == self.nf:
            raise StopIteration
        path = self.files[self.count]

        if self.video_flag[self.count]:
            # Read video
            self.mode = 'video'
            ret_val, img0 = self.cap.read()
            while not ret_val:
                self.count += 1
                self.cap.release()
                if self.count == self.nf:  # last video
                    raise StopIteration
                else:
                    path = self.files[self.count]
                    self.new_video(path)
                    ret_val, img0 = self.cap.read()

            self.frame += 1
            s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '

        else:
            # Read image
            self.count += 1
            img0 = cv2.imread(path)  # BGR
            assert img0 is not None, f'Image Not Found {path}'
            s = f'image {self.count}/{self.nf} {path}: '

        # Padded resize
        img = letterbox(img0,
                        self.img_size,
                        stride=self.stride,
                        auto=self.auto)[0]

        # Convert
        img = img.transpose((2, 0, 1))[::-1]  # HWC to CHW, BGR to RGB
        img = np.ascontiguousarray(img)

        return path, img, img0, self.cap, s
Beispiel #6
0
 def cache_images_to_disk(self, i):
     # Saves an image as an *.npy file for faster loading
     f = self.npy_files[i]
     if not f.exists():
         np.save(f.as_posix(), cv2.imread(self.im_files[i]))
Beispiel #7
0
if __name__ == '__main__':
    model = _create(name='yolov5s',
                    pretrained=True,
                    channels=3,
                    classes=80,
                    autoshape=True,
                    verbose=True)
    # model = custom(path='path/to/model.pt')  # custom

    # Verify inference
    from pathlib import Path

    import numpy as np
    from PIL import Image

    from utils.general import cv2

    imgs = [
        'data/images/zidane.jpg',  # filename
        Path('data/images/zidane.jpg'),  # Path
        'https://ultralytics.com/images/zidane.jpg',  # URI
        cv2.imread('data/images/bus.jpg')[:, :, ::-1],  # OpenCV
        Image.open('data/images/bus.jpg'),  # PIL
        np.zeros((320, 640, 3))
    ]  # numpy

    results = model(imgs, size=320)  # batched inference
    results.print()
    results.save()