コード例 #1
0
    def __next__(self):
        if self.count == self.nf:
            raise StopIteration
        path = self.files[self.count]

        if self.video_flag[self.count]:
            # Read video
            self.mode = 'video'
            ret_val, img = self.cap.read()
            if not ret_val:
                self.count += 1
                self.cap.release()
                if self.count == self.nf:  # last video
                    raise StopIteration
                else:
                    path = self.files[self.count]
                    self.new_video(path)
                    ret_val, img = self.cap.read()

            self.frame += 1
            # print(f'video {self.count + 1}/{self.nf} ({self.img}/{self.nframes}) {path}: ', end='')

        else:
            # Read image
            self.count += 1
            img = cv2.imread(path)  # BGR
            assert img is not None, 'Image Not Found ' + path
            # print(f'image {self.count}/{self.nf} {path}', end='\n')

        lb_path = ds.img2label_paths([path])
        return lb_path[0], img
コード例 #2
0
    def create_dataset_table(self, dataset, class_to_id, name='dataset'):
        """
        Create and return W&B artifact containing W&B Table of the dataset.

        arguments:
        dataset (LoadImagesAndLabels) -- instance of LoadImagesAndLabels class used to iterate over the data to build Table
        class_to_id (dict(int, str)) -- hash map that maps class ids to labels
        name (str) -- name of the artifact

        returns:
        dataset artifact to be logged or used
        """
        # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging
        artifact = wandb.Artifact(name=name, type="dataset")
        img_files = tqdm([dataset.path]) if isinstance(
            dataset.path, str) and Path(dataset.path).is_dir() else None
        img_files = tqdm(dataset.img_files) if not img_files else img_files
        for img_file in img_files:
            if Path(img_file).is_dir():
                artifact.add_dir(img_file, name='data/images')
                labels_path = 'labels'.join(dataset.path.rsplit('images', 1))
                artifact.add_dir(labels_path, name='data/labels')
            else:
                artifact.add_file(img_file,
                                  name='data/images/' + Path(img_file).name)
                label_file = Path(img2label_paths([img_file])[0])
                artifact.add_file(
                    str(label_file), name='data/labels/' +
                    label_file.name) if label_file.exists() else None
        table = wandb.Table(columns=["id", "train_image", "Classes", "name"])
        class_set = wandb.Classes([{
            'id': id,
            'name': name
        } for id, name in class_to_id.items()])
        for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)):
            box_data, img_classes = [], {}
            for cls, *xywh in labels[:, 1:].tolist():
                cls = int(cls)
                box_data.append({
                    "position": {
                        "middle": [xywh[0], xywh[1]],
                        "width": xywh[2],
                        "height": xywh[3]
                    },
                    "class_id": cls,
                    "box_caption": "%s" % (class_to_id[cls])
                })
                img_classes[cls] = class_to_id[cls]
            boxes = {
                "ground_truth": {
                    "box_data": box_data,
                    "class_labels": class_to_id
                }
            }  # inference-space
            table.add_data(si,
                           wandb.Image(paths, classes=class_set, boxes=boxes),
                           list(img_classes.values()),
                           Path(paths).name)
        artifact.add(table, name)
        return artifact
コード例 #3
0
 def create_dataset_table(self, dataset, class_to_id, name='dataset'):
     # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging
     artifact = wandb.Artifact(name=name, type="dataset")
     for img_file in tqdm([dataset.path]) if Path(dataset.path).is_dir() else tqdm(dataset.img_files):
         if Path(img_file).is_dir():
             artifact.add_dir(img_file, name='data/images')
             labels_path = 'labels'.join(dataset.path.rsplit('images', 1))
             artifact.add_dir(labels_path, name='data/labels')
         else:
             artifact.add_file(img_file, name='data/images/' + Path(img_file).name)
             label_file = Path(img2label_paths([img_file])[0])
             artifact.add_file(str(label_file),
                               name='data/labels/' + label_file.name) if label_file.exists() else None
     table = wandb.Table(columns=["id", "train_image", "Classes", "name"])
     class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()])
     for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)):
         height, width = shapes[0]
         labels[:, 2:] = (xywh2xyxy(labels[:, 2:].view(-1, 4))) * torch.Tensor([width, height, width, height])
         box_data, img_classes = [], {}
         for cls, *xyxy in labels[:, 1:].tolist():
             cls = int(cls)
             box_data.append({"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
                              "class_id": cls,
                              "box_caption": "%s" % (class_to_id[cls]),
                              "scores": {"acc": 1},
                              "domain": "pixel"})
             img_classes[cls] = class_to_id[cls]
         boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}}  # inference-space
         table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), json.dumps(img_classes),
                        Path(paths).name)
     artifact.add(table, name)
     return artifact