Example #1
0
    def _start_dataset_transform(self,
                                 dataset_dicts,
                                 dataset_tranforms,
                                 pre_context_dict=None):
        """ 总体逻辑,对于所有的transform依次执行:
                pre_process 所有的数据
                process 单条数据,并且进行filter
                然后将得到的dds输入到下一个管道中
                if runtime == True : means __getitem__() function, can't return None
                if runtime == False: means _load_annotations() function
        """
        timer = Timer()
        context = DatasetTransformContext()
        if pre_context_dict:
            for k, v in pre_context_dict.items():
                context[k] = v

        for transform in dataset_tranforms:
            #print (dataset_dicts[0])
            transform.pre_process(dataset_dicts, context)

            new_dataset_dicts = []
            for dd in dataset_dicts:
                if dd and transform._runtime == False:
                    dd = transform.process(dd, context)
                if dd: new_dataset_dicts.append(dd)
            dataset_dicts = new_dataset_dicts

        logging.info("DatasetTransform {} takes {:.2f} seconds.".format(
            self.raw_name, timer.seconds()))
        return dataset_dicts, context
Example #2
0
    def _load_annotations(self):
        timer = Timer()
        """Constructs the imdb."""
        # Compile the split data path
        logger.info('{} data path: {}'.format(self.name, self.label_file))

        # Construct the image db
        imdb = []
        f = open(self.label_file, "r")
        for line in f.readlines():
            img_path, label = line.strip().split(" ")
            imdb.append({
                "im_path": os.path.join(self.image_root, img_path),
                "class": int(label),
            })
        f.close()

        logging.info("Loading {} takes {:.2f} seconds.".format(
            self.label_file, timer.seconds()))

        dataset_dicts = []
        for i, item in enumerate(imdb):
            dataset_dicts.append({
                "image_id": i,
                "category_id": item["class"],
                "file_name": item["im_path"],
            })

        return dataset_dicts
Example #3
0
    def _load_clean(self):
        timer = Timer()

        def sample_clean(matrix, clean_num):
            mat = matrix.reshape([-1])
            clean_mat = np.zeros_like(mat)
            import random
            random.seed(self.seed)
            pool = set()
            while (len(pool) != clean_num):
                ind = random.choice(range(mat.size))
                if ind not in pool:
                    clean_mat[ind] = mat[ind]
                    pool.add(ind)
            print(list(pool)[:10])
            assert (clean_mat != 0).sum() == clean_num, "Error"
            return clean_mat

        if self.dataset_type == "train":
            path = osp.join(self.dataset_root, "select_bias",
                            "matrix_after.npy")
            matrix = np.load(path)
            matrix = sample_clean(matrix, self.clean_num)
            matrix = matrix.reshape([self.user_num, self.item_num])
            dataset_dicts = self._matrix2datasets(matrix)
        else:
            path = osp.join(self.dataset_root, "select_bias",
                            "clean_matrix.npy")
            dataset_dicts = self._load_matrix(path)

        logging.info("Loading MovieLen ::{} takes {:.2f} seconds.".format(
            "movie_len_noise", timer.seconds()))
        return dataset_dicts
Example #4
0
def benchmark_eval(args):
    cfg = setup(args)
    model = build_model(cfg)
    model.eval()
    logger.info("Model:\n{}".format(model))
    DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)

    cfg.defrost()
    cfg.DATALOADER.NUM_WORKERS = 0
    data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
    dummy_data = list(itertools.islice(data_loader, 100))

    def f():
        while True:
            yield from DatasetFromList(dummy_data, copy=False)

    for _ in range(5):  # warmup
        model(dummy_data[0])

    max_iter = 400
    timer = Timer()
    with tqdm.tqdm(total=max_iter) as pbar:
        for idx, d in enumerate(f()):
            if idx == max_iter:
                break
            model(d)
            pbar.update()
    logger.info("{} iters in {} seconds.".format(max_iter, timer.seconds()))
Example #5
0
 def __init__(self, warmup_iter=3):
     """
     Args:
         warmup_iter (int): the number of iterations at the beginning to exclude
             from timing.
     """
     self._warmup_iter = warmup_iter
     self._step_timer = Timer()
Example #6
0
 def _load_true(self):
     timer = Timer()
     path = osp.join(self.dataset_root, "select_bias",
                     "matrix_after" + ".npy")
     dataset_dicts = self._load_matrix(path)
     logging.info("Loading MovieLen ::{} takes {:.2f} seconds.".format(
         "movie_len_true", timer.seconds()))
     return dataset_dicts
Example #7
0
    def _load_annotations(self):
        timer = Timer()
        dataloader = self.dataloader = DataLoader(
            osp.join(self.dataset_root, self.dataset_name), self.with_feature)
        dataloader.generate_pair_wise_training_corpus()
        logging.info("Loading AmazonDataset::{} takes {:.2f} seconds.".format(
            self.dataset_name, timer.seconds()))
        dataset_dicts = []
        dataset_statistic = dataloader.statistics

        if (self.is_train):  # fill the dataset_dicts
            if (not self.with_feature):
                for user, pos_item, neg_item in zip(
                        dataloader.user_all,
                        dataloader.pos_item_all,
                        dataloader.neg_item_all,
                ):
                    dataset_dicts.append({
                        "user": user,
                        #"feat": feat,
                        "pos_item": pos_item,
                        #"pos_feat": pos_feat,
                        "neg_item": neg_item,
                        #"neg_feat": neg_feat,
                    })
            elif (self.with_feature):
                for user, pos_item, neg_item, feat, x_uf, x_if in zip(
                        dataloader.user_all, dataloader.pos_item_all,
                        dataloader.neg_item_all, dataloader.pos_feature_all,
                        dataloader.x_uf, dataloader.x_if):
                    dataset_dicts.append({
                        "user": user,
                        "feat": feat,
                        "pos_item": pos_item,
                        "neg_item": neg_item,
                        "x_uf": x_uf,
                        "x_if": x_if
                    })

        else:  # fill the dataset_dicts with different logic
            #assert len(dataloader.ground_truth_user_items_dict) == len(dataloader.compute_user_items_dict), "AmazonDataset have some wrong with it, len(gt)!=len(candidate)"
            validate_user = set(
                dataloader.ground_truth_user_items_dict.keys()) & set(
                    dataloader.compute_user_items_dict.keys())
            for user in validate_user:
                gt = dataloader.ground_truth_user_items_dict[user]
                candidate = dataloader.compute_user_items_dict[user]
                dataset_dicts.append({
                    "user": user,
                    "gt": gt,
                    "candidate": candidate,
                })

        return dataset_dicts, dataset_statistic
Example #8
0
 def _load_noise(self):
     timer = Timer()
     if self.dataset_type == "train":
         path = osp.join(self.dataset_root, "select_bias",
                         "observe_matrix_" + str(self.alpha) + ".npy")
     else:
         path = osp.join(self.dataset_root, "select_bias",
                         "clean_matrix.npy")
     dataset_dicts = self._load_matrix(path)
     logging.info("Loading MovieLen ::{} takes {:.2f} seconds.".format(
         "movie_len_noise", timer.seconds()))
     return dataset_dicts
Example #9
0
 def _runtime_dataset_transform(self, dd, dataset_tranforms):
     """ runtime stage transform proceduce
     """
     timer = Timer()
     context = self.context  # reuse the global information
     assert dd, "input dd is None, Some wrong happen in your dataset or you put None object in self.dataset_dict"
     for transform in dataset_tranforms:
         if transform._runtime == True:
             dd = transform.process(dd, context)
             assert dd, "return value can't be None, when in runtime mode"
     logging.info(
         "Runtime DatasetTrasnform {} takes {:.2f} seconds.".format(
             self.raw_name, timer.seconds()))
     self.context = context  # reset
     return dd
Example #10
0
 def _load_annotations(self):
     timer = Timer()
     self.noise_train, self.clean, self.noise_val, self.test, num_classes = generate_noisy_cifar(
         self.dataset_name, self.dataset_root, self.validate_number,
         self.noise_ratio, self.num_clean, self.seed, self.with_background)
     dataset_statistic = {
         'num_classes': num_classes,
         'format': 'HWC',
     }
     self.type2data = {
         'noise-train': self.noise_train,
         'clean': self.clean,
         'noise-val': self.noise_val,
         'test': self.test,
     }
     dataset_dicts = []
     current_img, current_label, current_mask = self.type2data[
         self.data_type]
     if self.dataset_name == 'cifar-10':
         dataset_statistic['mean'] = (0.4914, 0.4822, 0.4465)
         dataset_statistic['std'] = (0.2023, 0.1994, 0.2010)
     elif self.dataset_name == 'cifar-100':
         dataset_statistic['mean'] = (0.5071, 0.4867, 0.4408)
         dataset_statistic['std'] = (0.2675, 0.2565, 0.2761)
     else:
         raise RuntimeError("name must be cifar-10 / cifar-100")
     assert (len(current_img) == len(current_label))
     assert (current_mask is None or len(current_img) == len(current_mask))
     #print (current_img[0])
     for i in range(len(current_img)):
         item_dict = {}
         item_dict['image'] = current_img[i]  # H x W x C
         item_dict['image_id'] = i  # H x W x C
         item_dict['category_id'] = current_label[i]  # int
         if current_mask is not None:
             item_dict['is_clean'] = current_mask[i]
         else:
             item_dict['is_clean'] = 1
         item_dict['width'] = 32
         item_dict['height'] = 32
         item_dict['channel'] = 3
         dataset_dicts.append(item_dict)
     tot_len = len(dataset_dicts)
     dataset_dicts = dataset_dicts[:int(tot_len * self.discount)]
     print("dataset length: ", len(dataset_dicts))
     logging.info("Loading CIFAR Images::{} takes {:.2f} seconds.".format(
         self.dataset_name, timer.seconds()))
     return dataset_dicts, dataset_statistic
Example #11
0
    def _load_annotations(self):
        timer = Timer()
        raw_trainset, raw_testset = get_torch_mnist_dataset()
        # every set is a tuple of (datas, labels)
        __import__('pdb').set_trace()
        train_set, val_set, test_set, train_pos_set, train_neg_set = get_imbalance_dataset(
            raw_trainset, raw_testset)

        dataset_statistic = {
            'num_classes': 10,
            'format': 'HWC',
        }
        self.type2data = {
            'train': train_set,
            'val': val_set,
            'test': test_set,
        }
        __import__('pdb').set_trace()

        dataset_dicts = []
        current_img, current_label, current_mask = self.type2data[
            self.data_type]
        assert (len(current_img) == len(current_label))
        assert (current_mask is None or len(current_img) == len(current_mask))
        for i in range(len(current_img)):
            item_dict = {}
            item_dict['image'] = current_img[i]  # H x W x C
            item_dict['image_id'] = i  # H x W x C
            item_dict['category_id'] = current_label[i]  # int
            if current_mask is not None:
                item_dict['is_clean'] = current_mask[i]
            else:
                item_dict['is_clean'] = 1
            item_dict['width'] = 28
            item_dict['height'] = 28
            item_dict['channel'] = 1
            dataset_dicts.append(item_dict)
        tot_len = len(dataset_dicts)
        dataset_dicts = dataset_dicts[:int(tot_len * self.discount)]
        print("dataset length: ", len(dataset_dicts))
        logging.info("Loading CIFAR Images::{} takes {:.2f} seconds.".format(
            self.dataset_name, timer.seconds()))
        return dataset_dicts, dataset_statistic
Example #12
0
    def _load_dataset(self):
        timer = Timer()
        if self.with_feature:
            self._load_feature()
        meta_path = osp.join(self.dataset_root, self.dataset_name, 'meta.txt')
        with open(meta_path, "r") as fp:
            lines = fp.readlines()
        self.user_num = int(lines[0].strip())
        self.item_num = int(lines[1].strip())

        data_path = osp.join(self.dataset_root, self.dataset_name,
                             self.dataset_type + ".ascii")
        prop_path = osp.join(self.dataset_root, self.dataset_name,
                             "propensities" + ".ascii")
        self.mat_prop = None
        if (osp.exists(prop_path)): self.mat_prop = np.loadtxt(prop_path)
        dataset_dicts = self._load_matrix(data_path)
        logging.info("Loading dataset ::{} takes {:.2f} seconds.".format(
            self.dataset_name, timer.seconds()))
        return dataset_dicts
Example #13
0
    def _load_annotations(self):
        timer = Timer()
        """Constructs the imdb."""
        # Compile the split data path
        logger.info('{} data path: {}'.format(self.name, self.label_file))
        # Images are stored per class in subdirs (format: n<number>)
        class_ids = [k for k, v in IMAGENET_CATEGORIES.items()]
        class_id_cont_id = {
            k: v[0] - 1
            for k, v in IMAGENET_CATEGORIES.items()
        }
        # class_ids = sorted([
        #     f for f in os.listdir(split_path) if re.match(r'^n[0-9]+$', f)
        # ])
        # # Map ImageNet class ids to contiguous ids
        # class_id_cont_id = {v: i for i, v in enumerate(class_ids)}
        # Construct the image db
        imdb = []
        if "://" not in self.image_root:
            for class_id in class_ids:
                cont_id = class_id_cont_id[class_id]
                im_dir = os.path.join(self.label_file, class_id)
                for im_name in os.listdir(im_dir):
                    imdb.append({
                        'im_path': os.path.join(im_dir, im_name),
                        'class': cont_id,
                    })

        logging.info("Loading {} takes {:.2f} seconds.".format(
            self.label_file, timer.seconds()))

        dataset_dicts = []
        for i, item in enumerate(imdb):
            dataset_dicts.append({
                "image_id": i,
                "category_id": item["class"],
                "file_name": item["im_path"],
            })

        return dataset_dicts
Example #14
0
    def _load_raw(self):
        timer = Timer()
        mmap = {
            "train": "base",
            "test": "test",
        }
        path = osp.join(
            self.dataset_root,
            "u" + str(self.num_fold) + "." + mmap[self.dataset_type])
        with open(path, "r") as fp:
            lines = fp.readlines()

        dataset_dicts = []

        for line in lines:
            fields = line.strip().split("\t")
            user = int(fields[0])
            item = int(fields[1])
            score = int(fields[2])
            dataset_dicts.append((user, item, score))

        logging.info("Loading MovieLen ::{} takes {:.2f} seconds.".format(
            self.dataset_name, timer.seconds()))
        return dataset_dicts
Example #15
0
def benchmark_data(args):
    cfg = setup(args)

    dataloader = build_detection_train_loader(cfg)

    timer = Timer()
    itr = iter(dataloader)
    for i in range(10):  # warmup
        next(itr)
        if i == 0:
            startup_time = timer.seconds()
    timer = Timer()
    max_iter = 1000
    for _ in tqdm.trange(max_iter):
        next(itr)
    logger.info("{} iters ({} images) in {} seconds.".format(
        max_iter, max_iter * cfg.SOLVER.IMS_PER_BATCH, timer.seconds()))
    logger.info("Startup time: {} seconds".format(startup_time))
    vram = psutil.virtual_memory()
    logger.info("RAM Usage: {:.2f}/{:.2f} GB".format(
        (vram.total - vram.available) / 1024**3, vram.total / 1024**3))
Example #16
0
    def _load_annotations(self, json_file, image_root):
        """
        Load a json file in LVIS's annotation format.
        Args:
            json_file (str): full path to the LVIS json annotation file.
            image_root (str): the directory where the images in this json file exists.
        Returns:
            list[dict]: a list of dicts in cvpods standard format. (See
            `Using Custom Datasets </tutorials/datasets.html>`_ )
        Notes:
            1. This function does not read the image files.
            The results do not have the "image" field.
        """
        from lvis import LVIS

        json_file = PathManager.get_local_path(json_file)

        timer = Timer()
        lvis_api = LVIS(json_file)
        if timer.seconds() > 1:
            logger.info("Loading {} takes {:.2f} seconds.".format(
                json_file, timer.seconds()))

        # sort indices for reproducible results
        img_ids = sorted(lvis_api.imgs.keys())
        # imgs is a list of dicts, each looks something like:
        # {'license': 4,
        #  'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
        #  'file_name': 'COCO_val2014_000000001268.jpg',
        #  'height': 427,
        #  'width': 640,
        #  'date_captured': '2013-11-17 05:57:24',
        #  'id': 1268}
        imgs = lvis_api.load_imgs(img_ids)
        # anns is a list[list[dict]], where each dict is an annotation
        # record for an object. The inner list enumerates the objects in an image
        # and the outer list enumerates over images. Example of anns[0]:
        # [{'segmentation': [[192.81,
        #     247.09,
        #     ...
        #     219.03,
        #     249.06]],
        #   'area': 1035.749,
        #   'image_id': 1268,
        #   'bbox': [192.81, 224.8, 74.73, 33.43],
        #   'category_id': 16,
        #   'id': 42986},
        #  ...]
        anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids]

        # Sanity check that each annotation has a unique id
        ann_ids = [
            ann["id"] for anns_per_image in anns for ann in anns_per_image
        ]
        assert len(set(ann_ids)) == len(
            ann_ids), "Annotation ids in '{}' are not unique".format(json_file)

        imgs_anns = list(zip(imgs, anns))

        logger.info("Loaded {} images in the LVIS format from {}".format(
            len(imgs_anns), json_file))

        dataset_dicts = []
        for (img_dict, anno_dict_list) in imgs_anns:
            record = {}
            file_name = img_dict["file_name"]
            if img_dict["file_name"].startswith("COCO"):
                # Convert form the COCO 2014 file naming convention of
                # COCO_[train/val/test]2014_000000000000.jpg to the 2017 naming convention of
                # 000000000000.jpg (LVIS v1 will fix this naming issue)
                file_name = file_name[-16:]
            record["file_name"] = os.path.join(image_root, file_name)
            record["height"] = img_dict["height"]
            record["width"] = img_dict["width"]
            record["not_exhaustive_category_ids"] = img_dict.get(
                "not_exhaustive_category_ids", [])
            record["neg_category_ids"] = img_dict.get("neg_category_ids", [])
            image_id = record["image_id"] = img_dict["id"]

            objs = []
            for anno in anno_dict_list:
                # Check that the image_id in this annotation is the same as
                # the image_id we're looking at.
                # This fails only when the data parsing logic or the annotation file is buggy.
                assert anno["image_id"] == image_id
                obj = {"bbox": anno["bbox"], "bbox_mode": BoxMode.XYWH_ABS}
                obj["category_id"] = anno[
                    "category_id"] - 1  # Convert 1-indexed to 0-indexed
                segm = anno["segmentation"]  # list[list[float]]
                # filter out invalid polygons (< 3 points)
                valid_segm = [
                    poly for poly in segm
                    if len(poly) % 2 == 0 and len(poly) >= 6
                ]
                assert len(segm) == len(
                    valid_segm
                ), "Annotation contains an invalid polygon with < 3 points"
                assert len(segm) > 0
                obj["segmentation"] = segm
                objs.append(obj)
            record["annotations"] = objs
            dataset_dicts.append(record)

        return dataset_dicts
Example #17
0
    def _load_annotations(self, json_file, image_root):
        """
        Load a json file with CrowdHuman's instances annotation format.
        Currently supports instance detection, instance segmentation,
        and person keypoints annotations.

        Args:
            json_file (str): full path to the json file in CrowdHuman instances annotation format.
            image_root (str): the directory where the images in this json file exists.

        Returns:
            list[dict]: a list of dicts in cvpods standard format. (See
            `Using Custom Datasets </tutorials/datasets.html>`_ )

        Notes:
            1. This function does not read the image files.
               The results do not have the "image" field.
        """
        timer = Timer()
        json_file = PathManager.get_local_path(json_file)
        with open(json_file, 'r') as file:
            gt_records = file.readlines()
        if timer.seconds() > 1:
            logger.info("Loading {} takes {:.2f} seconds.".format(
                json_file, timer.seconds()))

        logger.info("Loaded {} images in CrowdHuman format from {}".format(
            len(gt_records), json_file))

        dataset_dicts = []

        ann_keys = ["tag", "hbox", "vbox", "head_attr", "extra"]
        for anno_str in gt_records:
            anno_dict = json.loads(anno_str)

            record = {}
            record["file_name"] = os.path.join(
                image_root, "{}.jpg".format(anno_dict["ID"]))
            record["image_id"] = anno_dict["ID"]

            objs = []
            for anno in anno_dict['gtboxes']:
                # Check that the image_id in this annotation is the same as
                # the image_id we're looking at.
                # This fails only when the data parsing logic or the annotation file is buggy.

                # The original COCO valminusminival2014 & minival2014 annotation files
                # actually contains bugs that, together with certain ways of using COCO API,
                # can trigger this assertion.
                obj = {key: anno[key] for key in ann_keys if key in anno}
                obj["bbox"] = anno["fbox"]
                obj["category_id"] = 0

                if 'extra' in anno and 'ignore' in anno[
                        'extra'] and anno['extra']['ignore'] != 0:
                    obj["category_id"] = -1

                obj["bbox_mode"] = BoxMode.XYWH_ABS
                objs.append(obj)
            record["annotations"] = objs
            dataset_dicts.append(record)

        return dataset_dicts
Example #18
0
    def _load_annotations(self,
                          json_file,
                          image_root,
                          dataset_name=None,
                          extra_annotation_keys=None):
        """
        Load a json file with COCO's instances annotation format.
        Currently supports instance detection, instance segmentation,
        and person keypoints annotations.

        Args:
            json_file (str): full path to the json file in COCO instances annotation format.
            image_root (str): the directory where the images in this json file exists.
            dataset_name (str): the name of the dataset (e.g., coco_2017_train).
                If provided, this function will also put "thing_classes" into
                the metadata associated with this dataset.
            extra_annotation_keys (list[str]): list of per-annotation keys that should also be
                loaded into the dataset dict (besides "iscrowd", "bbox", "keypoints",
                "category_id", "segmentation"). The values for these keys will be returned as-is.
                For example, the densepose annotations are loaded in this way.

        Returns:
            list[dict]: a list of dicts in cvpods standard format. (See
            `Using Custom Datasets </tutorials/datasets.html>`_ )

        Notes:
            1. This function does not read the image files.
            The results do not have the "image" field.
        """
        from pycocotools.coco import COCO

        timer = Timer()
        json_file = PathManager.get_local_path(json_file)
        with contextlib.redirect_stdout(io.StringIO()):
            coco_api = COCO(json_file)
        if timer.seconds() > 1:
            logger.info("Loading {} takes {:.2f} seconds.".format(
                json_file, timer.seconds()))

        id_map = None
        if dataset_name is not None:
            cat_ids = sorted(coco_api.getCatIds())
            cats = coco_api.loadCats(cat_ids)
            # The categories in a custom json file may not be sorted.
            thing_classes = [
                c["name"] for c in sorted(cats, key=lambda x: x["id"])
            ]
            self.meta["thing_classes"] = thing_classes

            # In COCO, certain category ids are artificially removed,
            # and by convention they are always ignored.
            # We deal with COCO's id issue and translate
            # the category ids to contiguous ids in [0, 80).

            # It works by looking at the "categories" field in the json, therefore
            # if users' own json also have incontiguous ids, we'll
            # apply this mapping as well but print a warning.
            if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)):
                if "coco" not in dataset_name:
                    logger.warning("""
    Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you.
    """)
            id_map = {v: i for i, v in enumerate(cat_ids)}
            self.meta["thing_dataset_id_to_contiguous_id"] = id_map

        # sort indices for reproducible results
        img_ids = sorted(coco_api.imgs.keys())
        # imgs is a list of dicts, each looks something like:
        # {'license': 4,
        #  'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
        #  'file_name': 'COCO_val2014_000000001268.jpg',
        #  'height': 427,
        #  'width': 640,
        #  'date_captured': '2013-11-17 05:57:24',
        #  'id': 1268}
        imgs = coco_api.loadImgs(img_ids)
        # anns is a list[list[dict]], where each dict is an annotation
        # record for an object. The inner list enumerates the objects in an image
        # and the outer list enumerates over images. Example of anns[0]:
        # [{'segmentation': [[192.81,
        #     247.09,
        #     ...
        #     219.03,
        #     249.06]],
        #   'area': 1035.749,
        #   'iscrowd': 0,
        #   'image_id': 1268,
        #   'bbox': [192.81, 224.8, 74.73, 33.43],
        #   'category_id': 16,
        #   'id': 42986},
        #  ...]
        anns = [coco_api.imgToAnns[img_id] for img_id in img_ids]

        if "minival" not in json_file:
            # The popular valminusminival & minival annotations for COCO2014 contain this bug.
            # However the ratio of buggy annotations there is tiny and does not affect accuracy.
            # Therefore we explicitly white-list them.
            ann_ids = [
                ann["id"] for anns_per_image in anns for ann in anns_per_image
            ]
            assert len(set(ann_ids)) == len(
                ann_ids), "Annotation ids in '{}' are not unique!".format(
                    json_file)

        imgs_anns = list(zip(imgs, anns))

        logger.info("Loaded {} images in COCO format from {}".format(
            len(imgs_anns), json_file))

        dataset_dicts = []

        ann_keys = ["iscrowd", "bbox", "keypoints", "category_id"
                    ] + (extra_annotation_keys or [])

        num_instances_without_valid_segmentation = 0

        for (img_dict, anno_dict_list) in imgs_anns:
            record = {}
            record["file_name"] = os.path.join(image_root,
                                               img_dict["file_name"])
            record["height"] = img_dict["height"]
            record["width"] = img_dict["width"]
            image_id = record["image_id"] = img_dict["id"]

            objs = []
            for anno in anno_dict_list:
                # Check that the image_id in this annotation is the same as
                # the image_id we're looking at.
                # This fails only when the data parsing logic or the annotation file is buggy.

                # The original COCO valminusminival2014 & minival2014 annotation files
                # actually contains bugs that, together with certain ways of using COCO API,
                # can trigger this assertion.
                assert anno["image_id"] == image_id
                assert anno.get("ignore", 0) == 0

                obj = {key: anno[key] for key in ann_keys if key in anno}

                segm = anno.get("segmentation", None)
                if segm:  # either list[list[float]] or dict(RLE)
                    if not isinstance(segm, dict):
                        # filter out invalid polygons (< 3 points)
                        segm = [
                            poly for poly in segm
                            if len(poly) % 2 == 0 and len(poly) >= 6
                        ]
                        if len(segm) == 0:
                            num_instances_without_valid_segmentation += 1
                            continue  # ignore this instance
                    obj["segmentation"] = segm

                keypts = anno.get("keypoints", None)
                if keypts:  # list[int]
                    for idx, v in enumerate(keypts):
                        if idx % 3 != 2:
                            # COCO's segmentation coordinates are floating points in [0, H or W],
                            # but keypoint coordinates are integers in [0, H-1 or W-1]
                            # Therefore we assume the coordinates are "pixel indices" and
                            # add 0.5 to convert to floating point coordinates.
                            keypts[idx] = v + 0.5
                    obj["keypoints"] = keypts

                obj["bbox_mode"] = BoxMode.XYWH_ABS
                if id_map:
                    obj["category_id"] = id_map[obj["category_id"]]
                objs.append(obj)
            record["annotations"] = objs
            dataset_dicts.append(record)

        if num_instances_without_valid_segmentation > 0:
            logger.warning(
                "Filtered out {} instances without valid segmentation. "
                "There might be issues in your dataset generation process.".
                format(num_instances_without_valid_segmentation))
        return dataset_dicts
Example #19
0
 def before_train(self):
     self._start_time = time.perf_counter()
     self._total_timer = Timer()
     self._total_timer.pause()
Example #20
0
    def _load_annotations(self):
        """ referit dataset have many information: 
                sentences / tokens / raw 
                annId -> bbox + mask infor
                imgId -> file name + :
        """
        timer = Timer()
        self.REFER = self._REFER(data_root=osp.join(self.refer_tool_root,
                                                    "data/"),
                                 dataset=self.dataset_name,
                                 splitBy=self.dataset_splitby)

        dataset_dicts = []
        split = self.dataset_split
        ref_ids = self.REFER.getRefIds(split=split)
        self.proposal_dict = None
        if self._proposal_root:
            with open(osp.join(self._proposal_root, 'proposals.pkl'),
                      'rb') as fp:
                self.proposal_dict = pickle.load(fp)

        for ref_id in tqdm.tqdm(ref_ids):
            ref_dict = self.REFER.loadRefs(ref_id)[0]
            ann_dict = self.REFER.loadAnns(ref_dict['ann_id'])[0]
            img_dict = self.REFER.loadImgs(ref_dict['image_id'])[0]
            cat_dict = self.REFER.loadCats(ref_dict['category_id'])[0]

            for sentence in ref_dict['sentences']:
                dd = {}
                dd['sent_id'] = sentence['sent_id']
                if self._verbose:
                    dd['img_dict'] = img_dict
                    dd['ref_dict'] = ref_dict
                    dd['cat_dict'] = cat_dict
                    dd['ann_dict'] = ann_dict

                dd['file_name'] = img_dict['file_name']
                dd['image_size'] = [img_dict['height'], img_dict['width']]
                dd['tokens'] = sentence['tokens']
                dd['raw'] = sentence['raw']
                dd['category_id'] = None  # cat_dict[] # FIXME (增加cat的逻辑)
                dd['bbox'] = self.REFER.getRefBox(ref_id)
                if len(dd['bbox']) != 4:
                    print('invalid bbox')
                    continue
                dd['height'] = img_dict['height']
                dd['width'] = img_dict['width']
                # FIXME (增加gt_classes信息)
                b = dd['bbox']
                dataset_dicts.append(dd)

        logging.info("Loading {} takes {:.2f} seconds.".format(
            self.raw_name, timer.seconds()))
        dataset_tranforms = [
            #DatasetTransform_Unknowlize_Truncate(False, -1, 15, ['tokens'], ['token_ids']),
            #DatasetTransform_Tokenize(False, True, ['token_ids'], ['token_ids'], self._is_train, osp.join(self._output_dir, "vocabulary.pkl")),
            #DatasetTransform_NegativeTokensSample(1, ['token_ids'], ['neg_token_ids'])
        ]

        if not self._image_only:
            dataset_tranforms.append(DatasetTransform_ExcludeEmptyProposal())

        #{{{ add this logic to judge : whether small gtboxes will cause no proper proposals
        self._exclude_small_gtboxes = not self._is_train and not self._image_only
        if self._exclude_small_gtboxes:
            dataset_tranforms.append(DatasetTransform_ExcludeSmallGt(20000)),
        #}}}
        if self._add_attribute:
            atts_json_file = osp.join(
                self.refer_tool_root, "data/", "parsed_atts",
                self.dataset_name + '_' + self.dataset_splitby, "sents.json")
            dataset_tranforms.append(
                DatasetTransform_AddAttribute(
                    sent_json_filepath=atts_json_file))

        dataset_dicts, context = self._start_dataset_transform(
            dataset_dicts, dataset_tranforms,
            {'proposal_dict': self.proposal_dict})
        self.context = context

        if not self._is_train and not self._image_only:  #{{{
            #indices = np.random.randint(0, len(dataset_dicts), 1000)
            #self.dataset_dicts = [dataset_dicts[i] for i in indices]

            self.dataset_dicts = dataset_dicts
        else:
            self.dataset_dicts = dataset_dicts  #}}}

        # if image_only : 那么将所有相同名字的过滤掉
        if self._image_only:
            print('dataset_len:', len(dataset_dicts))
            print('Image Only Mode!!')
            image_pool = {}
            new_dict = []
            for item in self.dataset_dicts:
                if item['file_name'] not in image_pool:
                    new_dict.append(item)
                    image_pool[item['file_name']] = 1
            print('dataset_len:', len(new_dict))
Example #21
0
class IterationTimer(HookBase):
    """
    Track the time spent for each iteration (each run_step call in the trainer).
    Print a summary in the end of training.

    This hook uses the time between the call to its :meth:`before_step`
    and :meth:`after_step` methods.
    Under the convention that :meth:`before_step` of all hooks should only
    take negligible amount of time, the :class:`IterationTimer` hook should be
    placed at the beginning of the list of hooks to obtain accurate timing.
    """
    def __init__(self, warmup_iter=3):
        """
        Args:
            warmup_iter (int): the number of iterations at the beginning to exclude
                from timing.
        """
        self._warmup_iter = warmup_iter
        self._step_timer = Timer()

    def before_train(self):
        self._start_time = time.perf_counter()
        self._total_timer = Timer()
        self._total_timer.pause()

    def after_train(self):
        logger = logging.getLogger(__name__)
        total_time = time.perf_counter() - self._start_time
        total_time_minus_hooks = self._total_timer.seconds()
        hook_time = total_time - total_time_minus_hooks

        num_iter = self.trainer.iter + 1 - self.trainer.start_iter - self._warmup_iter

        if num_iter > 0 and total_time_minus_hooks > 0:
            # Speed is meaningful only after warmup
            # NOTE this format is parsed by grep in some scripts
            logger.info(
                "Overall training speed: {} iterations in {} ({:.4f} s / it)".
                format(
                    num_iter,
                    str(datetime.timedelta(
                        seconds=int(total_time_minus_hooks))),
                    total_time_minus_hooks / num_iter,
                ))

        logger.info("Total training time: {} ({} on hooks)".format(
            str(datetime.timedelta(seconds=int(total_time))),
            str(datetime.timedelta(seconds=int(hook_time))),
        ))

    def before_step(self):
        self._step_timer.reset()
        self._total_timer.resume()

    def after_step(self):
        # +1 because we're in after_step
        iter_done = self.trainer.iter - self.trainer.start_iter + 1
        if iter_done >= self._warmup_iter:
            sec = self._step_timer.seconds()
            self.trainer.storage.put_scalars(time=sec)
        else:
            self._start_time = time.perf_counter()
            self._total_timer.reset()

        self._total_timer.pause()
Example #22
0
    def _load_annotations(self,
                          json_file,
                          image_root,
                          dataset_name=None,
                          extra_annotation_keys=None):
        """
        Load a json file with WiderFace's instances annotation format.
        Currently supports instance detection, instance segmentation,
        and person keypoints annotations.

        Args:
            json_file (str): full path to the json file in WiderFace instances annotation format.
            image_root (str): the directory where the images in this json file exists.
            dataset_name (str): the name of the dataset (e.g., widerface_2019_train).
                If provided, this function will also put "thing_classes" into
                the metadata associated with this dataset.
            extra_annotation_keys (list[str]): list of per-annotation keys that should also be
                loaded into the dataset dict (besides "iscrowd", "bbox", "keypoints",
                "category_id", "segmentation"). The values for these keys will be returned as-is.
                For example, the densepose annotations are loaded in this way.

        Returns:
            list[dict]: a list of dicts in cvpods standard format. (See
            `Using Custom Datasets </tutorials/datasets.html>`_ )

        Notes:
            1. This function does not read the image files.
               The results do not have the "image" field.
        """
        from pycocotools.coco import COCO

        timer = Timer()
        json_file = PathManager.get_local_path(json_file)
        with contextlib.redirect_stdout(io.StringIO()):
            coco_api = COCO(json_file)
        if timer.seconds() > 1:
            logger.info("Loading {} takes {:.2f} seconds.".format(
                json_file, timer.seconds()))

        id_map = None
        if dataset_name is not None:
            meta = self.meta
            cat_ids = sorted(coco_api.getCatIds())

            id_map = {v: i for i, v in enumerate(cat_ids)}
            meta["thing_dataset_id_to_contiguous_id"] = id_map

        # sort indices for reproducible results
        img_ids = sorted(list(coco_api.imgs.keys()))
        # imgs is a list of dicts, each looks something like:
        # {'license': 4,
        #  'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
        #  'file_name': 'COCO_val2014_000000001268.jpg',
        #  'height': 427,
        #  'width': 640,
        #  'date_captured': '2013-11-17 05:57:24',
        #  'id': 1268}
        imgs = coco_api.loadImgs(img_ids)
        # anns is a list[list[dict]], where each dict is an annotation
        # record for an object. The inner list enumerates the objects in an image
        # and the outer list enumerates over images. Example of anns[0]:
        # [{'segmentation': [[192.81,
        #     247.09,
        #     ...
        #     219.03,
        #     249.06]],
        #   'area': 1035.749,
        #   'iscrowd': 0,
        #   'image_id': 1268,
        #   'bbox': [192.81, 224.8, 74.73, 33.43],
        #   'category_id': 16,
        #   'id': 42986},
        #  ...]
        anns = [coco_api.imgToAnns[img_id] for img_id in img_ids]

        if "minival" not in json_file:
            # The popular valminusminival & minival annotations for COCO2014 contain this bug.
            # However the ratio of buggy annotations there is tiny and does not affect accuracy.
            # Therefore we explicitly white-list them.
            ann_ids = [
                ann["id"] for anns_per_image in anns for ann in anns_per_image
            ]
            assert len(set(ann_ids)) == len(ann_ids), \
                "Annotation ids in '{}' are not unique!".format(json_file)

        imgs_anns = list(zip(imgs, anns))

        logger.info("Loaded {} images in COCO format from {}".format(
            len(imgs_anns), json_file))

        dataset_dicts = []

        ann_keys = ["iscrowd", "bbox", "keypoints", "category_id"
                    ] + (extra_annotation_keys or [])

        for (img_dict, anno_dict_list) in imgs_anns:
            record = {}
            record["file_name"] = os.path.join(image_root,
                                               img_dict["file_name"])
            record["height"] = img_dict["height"]
            record["width"] = img_dict["width"]
            image_id = record["image_id"] = img_dict["id"]

            objs = []
            for anno in anno_dict_list:
                # Check that the image_id in this annotation is the same as
                # the image_id we're looking at.
                # This fails only when the data parsing logic or the annotation file is buggy.

                # The original COCO valminusminival2014 & minival2014 annotation files
                # actually contains bugs that, together with certain ways of using COCO API,
                # can trigger this assertion.
                assert anno["image_id"] == image_id

                # ensure the width and height of bbox are greater than 0
                if anno["bbox"][2] <= 0 or anno["bbox"][3] <= 0:
                    continue

                if anno.get("ignore", 0) != 0:
                    continue

                obj = {key: anno[key] for key in ann_keys if key in anno}

                obj["bbox_mode"] = BoxMode.XYWH_ABS
                if id_map:
                    obj["category_id"] = id_map[obj["category_id"]]
                objs.append(obj)
            record["annotations"] = objs
            dataset_dicts.append(record)
        return dataset_dicts