def save_checkpoint(path_to_checkpoint, model, optimizer, epoch, cfg):
    """
    save a checkpoint
    :param path_to_checkpoint:
    :param mdoel:
    :param optimizer:
    :param epoch:
    :param cfg:
    :return:
    """
    logger.info("save checkpoint in epoch {}".format(epoch))

    PathManager.mkdirs(get_checkpoint_dir(path_to_checkpoint))
    sd = model.state_dict()
    #Recode the state
    checkpoint = {
        "epoch": epoch,
        "model_state": sd,
        "optimizer_state": optimizer.state_dict(),
        "cfg": cfg.dump()
    }
    checkpoint_path = get_path_to_checkpoints(path_to_checkpoint, epoch + 1)
    # if (epoch+1)%10==0 or (epoch+1)==cfg.SOLVER.MAX_EPOCH:
    with PathManager.open(checkpoint_path, "wb") as f:
        torch.save(checkpoint, f)
    return checkpoint_path
def make_checkpoint_dir(path_to_checkpoint):

    checkpoint_dir = os.path.join(path_to_checkpoint, "checkpoints")

    if not PathManager.exists(checkpoint_dir):
        PathManager.mkdirs(checkpoint_dir)
    return checkpoint_dir
Exemplo n.º 3
0
def setup_logger(
    output=None, distributed_rank=0, *, color=True, name="segmentation", abbrev_name=None
):
    """
    Initialize the segmentation logger and set its verbosity level to "INFO".
    Args:
        output (str): a file name or a directory to save log. If None, will not save log file.
            If ends with ".txt" or ".log", assumed to be a file name.
            Otherwise, logs will be saved to `output/log.txt`.
        name (str): the root module name of this logger
        abbrev_name (str): an abbreviation of the module, to avoid long names in logs.
            Set to "" to not log the root module in logs.
            By default, will abbreviate "segmentation" to "seg" and leave other
            modules unchanged.
    Returns:
        logging.Logger: a logger
    """
    logger = logging.getLogger(name)
    logger.setLevel(logging.DEBUG)
    logger.propagate = False

    if abbrev_name is None:
        abbrev_name = "seg" if name == "segmentation" else name

    plain_formatter = logging.Formatter(
        "[%(asctime)s] %(name)s %(levelname)s: %(message)s", datefmt="%m/%d %H:%M:%S"
    )
    # stdout logging: master only
    if distributed_rank == 0:
        ch = logging.StreamHandler(stream=sys.stdout)
        ch.setLevel(logging.DEBUG)
        if color:
            formatter = _ColorfulFormatter(
                colored("[%(asctime)s %(name)s]: ", "green") + "%(message)s",
                datefmt="%m/%d %H:%M:%S",
                root_name=name,
                abbrev_name=str(abbrev_name),
            )
        else:
            formatter = plain_formatter
        ch.setFormatter(formatter)
        logger.addHandler(ch)

    # file logging: all workers
    if output is not None:
        if output.endswith(".txt") or output.endswith(".log"):
            filename = output
        else:
            filename = os.path.join(output, "log.txt")
        if distributed_rank > 0:
            filename = filename + ".rank{}".format(distributed_rank)
        # os.makedirs(os.path.dirname(filename))
        PathManager.mkdirs(os.path.dirname(filename))

        fh = logging.StreamHandler(_cached_log_stream(filename))
        fh.setLevel(logging.DEBUG)
        fh.setFormatter(plain_formatter)
        logger.addHandler(fh)

    return logger
Exemplo n.º 4
0
def do_feature_extraction(cfg, model, dataset_name):
    with inference_context(model):
        dump_folder = os.path.join(cfg.OUTPUT_DIR, "features",
                                   dataset_to_folder_mapper[dataset_name])
        PathManager.mkdirs(dump_folder)
        # data_loader = build_detection_test_loader_with_attributes(cfg, dataset_name)
        extract_grid_feature_on_local(model, dump_folder, 'data/train_images')
    def evaluate(self):
        if self._distributed:
            comm.synchronize()
            self._predictions = comm.gather(self._predictions, dst=0)
            self._predictions = list(itertools.chain(*self._predictions))

            if not comm.is_main_process():
                return {}

        if len(self._predictions) == 0:
            self._logger.warning(
                "[COCOEvaluator] Did not receive valid predictions.")
            return {}

        if self._output_dir:
            PathManager.mkdirs(self._output_dir)
            file_path = os.path.join(self._output_dir,
                                     "instances_predictions.pth")
            with PathManager.open(file_path, "wb") as f:
                torch.save(self._predictions, f)

        self._results = OrderedDict()
        if "instances" in self._predictions[0]:
            self._eval_predictions()
        # Copy so the caller can do whatever with results
        return copy.deepcopy(self._results)
Exemplo n.º 6
0
    def process(self, inputs, outputs):
        for input, output in zip(inputs, outputs):
            samples = output[
                'samples']  # list of size num_samples with el: # nc, t, h, w
            v_idx = input['video_idx']
            for sample_idx in range(len(samples)):
                sample = samples[sample_idx].squeeze(
                    0)  # t, h, w if nc == 1 or nc, t, h, w
                if sample.dim() == 4:
                    sample = sample.transpose(0, 1)  # t, nc, h, w
                self.vqvae.to(sample.device)
                code = sample.detach().cpu().numpy()
                sample = self.vqvae.decode(sample)  # T, 3, H, W
                sample = self.vqvae.back_normalizer(sample)  # T, 3, H, W
                if self.scale_to_zeroone:
                    sample = sample * 255
                sample.clamp_(0.0, 255.0)
                sample = sample.permute(0, 2, 3, 1).contiguous()  # T, H, W, 3
                sample = sample.detach().cpu().numpy().astype(np.uint8)

                video_dir = os.path.join(self._output_dir, "samples",
                                         self._dataset_name,
                                         f'video_{sample_idx}_{v_idx}')
                PathManager.mkdirs(video_dir)
                np.save(os.path.join(video_dir, f'codes.npy'), code)
                for frame_idx in range(len(sample)):
                    frame_path = os.path.join(video_dir, f'{frame_idx}.png')
                    for i in range(10):
                        try:
                            save_image(sample[frame_idx], frame_path)
                            break
                        except OSError:
                            print(f'sleep 3 sec and try again #{i}')
                            time.sleep(3)
                            continue
Exemplo n.º 7
0
def default_setup(cfg, args):
    output_dir = cfg.OUTPUT_DIR
    if comm.is_main_process() and output_dir:
        PathManager.mkdirs(output_dir)

    rank = comm.get_rank()
    setup_logger(output_dir, distributed_rank=rank, name="fvcore")
    logger = setup_logger(output_dir, distributed_rank=rank)

    logger.info("Rank of current process: {}. World size: {}".format(
        rank, comm.get_world_size()))
    logger.info("Environment info:\n" + collect_env_info())

    logger.info("Command line arguments: " + str(args))
    if hasattr(args, "config") and args.config != "":
        logger.info("Contents of args.config={}:\n{}".format(
            args.config,
            PathManager.open(args.config, "r").read()))

    logger.info("Running with full config:\n{}".format(cfg))

    seed_all_rng(None if cfg.SEED < 0 else cfg.SEED + rank)

    if not (hasattr(args, "eval_only") and args.eval_only):
        torch.backends.cudnn.benchmark = cfg.CUDNN_BENCHMARK
    def evaluate(self):
        if self._distributed:
            synchronize()
            endpoint_errors = all_gather(self._endpoint_errors)
            endpoint_errors = [per_image for per_gpu in endpoint_errors for per_image in per_gpu]
            self._predictions = all_gather(self._predictions)
            if not is_main_process():
                return

        if self._output_dir:
            PathManager.mkdirs(self._output_dir)
            file_path = os.path.join(self._output_dir, "flow_predictions.json")
            with PathManager.open(file_path, "w") as f:
                f.write(json.dumps(self._predictions))

        ave_epe = sum(endpoint_errors) / len(endpoint_errors)
        res = {"ave_epe": ave_epe}

        if self._output_dir:
            file_path = os.path.join(self._output_dir, "flow_evaluation.pth")
            with PathManager.open(file_path, "wb") as f:
                torch.save(res, f)

        results = OrderedDict({"flow": res})
        small_table = create_small_table(res)
        self._logger.info("Evaluation results for flow: \n" + small_table)
        dump_info_one_task = {
            "task": "flow",
            "tables": [small_table],
        }
        _dump_to_markdown([dump_info_one_task])
        return results
Exemplo n.º 9
0
def convert_to_coco_json(dataset_name, output_file, allow_cached=True):
    """
    Converts dataset into COCO format and saves it to a json file.
    dataset_name must be registered in DatasetCatalog and in detectron2's standard format.

    Args:
        dataset_name:
            reference from the config file to the catalogs
            must be registered in DatasetCatalog and in detectron2's standard format
        output_file: path of json file that will be saved to
        allow_cached: if json file is already present then skip conversion
    """

    # TODO: The dataset or the conversion script *may* change,
    # a checksum would be useful for validating the cached data

    PathManager.mkdirs(os.path.dirname(output_file))
    with file_lock(output_file):
        if os.path.exists(output_file) and allow_cached:
            logger.info(
                f"Cached annotations in COCO format already exist: {output_file}"
            )
        else:
            logger.info(
                f"Converting dataset annotations in '{dataset_name}' to COCO format ...)"
            )
            coco_dict = convert_to_coco_dict(dataset_name)

            with PathManager.open(output_file, "w") as json_file:
                logger.info(
                    f"Caching annotations in COCO format: {output_file}")
                json.dump(coco_dict, json_file)
Exemplo n.º 10
0
    def evaluate(self):
        if self._distributed:
            comm.synchronize()
            predictions = comm.gather(self._predictions, dst=0)
            predictions = list(itertools.chain(*predictions))

            if not comm.is_main_process():
                return {}
        else:
            predictions = self._predictions
        # preictions: list of dict [{'image_id', 'instances'(list of dict [{'image_id', 'category_id', bbox, score}])}]

        if len(predictions) == 0:
            self._logger.warning(
                "[SMDEvaluator] Did not receive valid predictions.")
            return {}

        if self._output_dir:
            PathManager.mkdirs(self._output_dir)
            file_path = os.path.join(self._output_dir,
                                     "instances_predictions.pth")
            with PathManager.open(file_path, "wb") as f:
                torch.save(predictions, f)

        self._results = OrderedDict()
        if "proposals" in predictions[0]:
            self._eval_box_proposals(predictions)
        if "instances" in predictions[0]:
            self._eval_predictions(set(self._tasks), predictions)
            self._eval_predictions_others(self._coco_api, predictions)
        # Copy so the caller can do whatever with results
        return copy.deepcopy(self._results)
Exemplo n.º 11
0
def convert_to_coco_json(dataset_name, output_file, allow_cached=True):
    """
    Converts dataset into COCO format and saves it to a json file.
    dataset_name must be registered in DatasetCatalog and in detectron2's standard format.

    Args:
        dataset_name:
            reference from the config file to the catalogs
            must be registered in DatasetCatalog and in detectron2's standard format
        output_file: path of json file that will be saved to
        allow_cached: if json file is already present then skip conversion
    """

    # TODO: The dataset or the conversion script *may* change,
    # a checksum would be useful for validating the cached data

    PathManager.mkdirs(os.path.dirname(output_file))
    with file_lock(output_file):
        if PathManager.exists(output_file) and allow_cached:
            logger.warning(
                f"Using previously cached COCO format annotations at '{output_file}'. "
                "You need to clear the cache file if your dataset has been modified."
            )
        else:
            logger.info(
                f"Converting annotations of dataset '{dataset_name}' to COCO format ...)"
            )
            coco_dict = convert_to_coco_dict(dataset_name)

            logger.info(
                f"Caching COCO format annotations at '{output_file}' ...")
            with PathManager.open(output_file, "w") as f:
                json.dump(coco_dict, f)
Exemplo n.º 12
0
    def evaluate(self):
        if self._distributed:
            comm.synchronize()
            predictions = comm.gather(self._predictions, dst=0)
            predictions = list(itertools.chain(*predictions))

            if not comm.is_main_process():
                return
        else:
            predictions = self._predictions

        if len(predictions) == 0:
            self._logger.warning(
                "[LVISEvaluator] Did not receive valid predictions.")
            return {}

        if self._output_dir:
            PathManager.mkdirs(self._output_dir)
            file_path = os.path.join(self._output_dir,
                                     "instances_predictions.pth")
            with PathManager.open(file_path, "wb") as f:
                torch.save(predictions, f)

        self._results = OrderedDict()
        if "proposals" in predictions[0]:
            self._eval_box_proposals(predictions)
        if "instances" in predictions[0]:
            self._eval_predictions(set(self._tasks), predictions)
        return copy.deepcopy(self._results)
Exemplo n.º 13
0
def trace_and_save_torchscript(
    model: nn.Module,
    inputs: Tuple[torch.Tensor],
    output_path: str,
    _extra_files: Optional[Dict[str, bytes]] = None,
):
    logger.info("Tracing and saving TorchScript to {} ...".format(output_path))

    # TODO: patch_builtin_len depends on D2, we should either copy the function or
    # dynamically registering the D2's version.
    from detectron2.export.torchscript_patch import patch_builtin_len
    with torch.no_grad(), patch_builtin_len():
        script_model = torch.jit.trace(model, inputs)

    if _extra_files is None:
        _extra_files = {}
    model_file = os.path.join(output_path, "model.jit")

    PathManager.mkdirs(output_path)
    with PathManager.open(model_file, "wb") as f:
        torch.jit.save(script_model, f, _extra_files=_extra_files)

    data_file = os.path.join(output_path, "data.pth")
    with PathManager.open(data_file, "wb") as f:
        torch.save(inputs, f)

    # NOTE: new API doesn't require return
    return model_file
Exemplo n.º 14
0
def simple_default_setup(cfg):
    """
    Perform some basic common setups at the beginning of a job, including:

    1. Set up the detectron2 logger
    2. Log basic information about environment, and config
    3. Backup the config to the output directory

    Args:
        cfg (CfgNode): the full config to be used
    """
    output_dir = cfg.OUTPUT_DIR
    if comm.is_main_process() and output_dir:
        PathManager.mkdirs(output_dir)

    rank = comm.get_rank()
    setup_logger(output_dir, distributed_rank=rank, name="fvcore")
    logger = setup_logger(output_dir, distributed_rank=rank)

    logger.info("Rank of current process: {}. World size: {}".format(
        rank, comm.get_world_size()))
    logger.info("Environment info:\n" + collect_env_info())

    logger.info("Running with full config:\n{}".format(cfg))
    if comm.is_main_process() and output_dir:
        # Note: some of our scripts may expect the existence of
        # config.yaml in output directory
        path = os.path.join(output_dir, "config.yaml")
        with PathManager.open(path, "w") as f:
            f.write(cfg.dump())
        logger.info("Full config saved to {}".format(os.path.abspath(path)))

    # make sure each worker has a different, yet deterministic seed if specified
    seed_all_rng(None if cfg.SEED < 0 else cfg.SEED + rank)
    def evaluate(self, img_ids=None):
        """
        Args:
            img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset
        """
        if self._distributed:
            comm.synchronize()
            predictions = comm.gather(self._predictions, dst=0)
            predictions = list(itertools.chain(*predictions))

            if not comm.is_main_process():
                return {}
        else:
            predictions = self._predictions

        if len(predictions) == 0:
            self._logger.warning("[COCOEvaluator] Did not receive valid predictions.")
            return {}

        if self._output_dir:
            PathManager.mkdirs(self._output_dir)
            file_path = os.path.join(self._output_dir, "instances_predictions.pth")
            with PathManager.open(file_path, "wb") as f:
                torch.save(predictions, f)

        self._results = OrderedDict()
        self._TTA_results = OrderedDict()
        self._score_df = OrderedDict()

        if "proposals" in predictions[0]:
            self._eval_box_proposals(predictions)
        if "instances" in predictions[0]:
            self._eval_predictions(set(self._tasks), predictions, img_ids=img_ids)
        # Copy so the caller can do whatever with results
        return copy.deepcopy(self._results)
Exemplo n.º 16
0
def save_checkpoint(path_to_job, model, model_name, optimizer, epoch, cfg):
    """
    Save a checkpoint.
    Args:
        model (model): model to save the weight to the checkpoint.
        optimizer (optim): optimizer to save the historical state.
        epoch (int): current number of epoch of the model.
        cfg (CfgNode): configs to save.
    """
    # Save checkpoints only from the master process.

    # Ensure that the checkpoint dir exists.
    PathManager.mkdirs(get_checkpoint_dir(path_to_job))
    # Omit the DDP wrapper in the multi-gpu setting.
    sd = model.module.state_dict() if cfg.NUM_GPUS > 1 else model.state_dict()
    # Record the state.
    checkpoint = {
        "epoch": epoch,
        "model_state": sd,
        "optimizer_state": optimizer.state_dict(),
        "cfg": cfg.dump(),
    }
    # Write the checkpoint.
    path_to_checkpoint = get_path_to_checkpoint(path_to_job, model_name,
                                                epoch + 1)
    # if (epoch+1)%10==0 or(epoch+1)==cfg.SOLVER.MAX_EPOCH :
    with PathManager.open(path_to_checkpoint, "wb") as f:
        torch.save(checkpoint, f)
    return path_to_checkpoint
Exemplo n.º 17
0
    def merge(self):
        """merge all clip features of a video into one/several 
           fix-size matrix(es)
        """
        if not PathManager.exists(self.merge_dir):
            PathManager.mkdirs(self.merge_dir)

        for video_name in PathManager.ls(self.save_dir):
            video_dir = os.path.join(self.save_dir, video_name)
            num_feats = len(PathManager.ls(video_dir))

            if self.min_length <= num_feats <= self.max_length:
                merged_feat = torch.zeros((num_feats, self.dim),
                                          dtype=torch.float32)

                for clip_idx in range(num_feats):
                    feat = torch.load(
                        os.path.join(video_dir, f'{clip_idx}.pth'))
                    merged_feat[clip_idx, :] = torch.from_numpy(feat)

                torch.save(merged_feat,
                           os.path.join(self.merge_dir, f'{video_name}.pth'))
            else:
                # TODO
                print(video_name)
Exemplo n.º 18
0
    def _eval_predictions(self, predictions, imgIds=None):
        """
        Evaluate predictions on densepose.
        Return results with the metrics of the tasks.
        """
        self._logger.info("Preparing results for COCO format ...")

        if self._output_dir:
            PathManager.mkdirs(self._output_dir)
            file_path = os.path.join(self._output_dir,
                                     "coco_densepose_predictions.pth")
            with PathManager.open(file_path, "wb") as f:
                torch.save(predictions, f)

        self._logger.info("Evaluating predictions ...")
        res = OrderedDict()
        results_gps, results_gpsm, results_segm = _evaluate_predictions_on_coco(
            self._coco_api,
            predictions,
            min_threshold=self._min_threshold,
            imgIds=imgIds)
        res["densepose_gps"] = results_gps
        res["densepose_gpsm"] = results_gpsm
        res["densepose_segm"] = results_segm
        return res
Exemplo n.º 19
0
    def __init__(self,
                 unified_label_file,
                 dataset_name,
                 cfg,
                 distributed,
                 output_dir=None):
        super().__init__(dataset_name, cfg, distributed, output_dir=output_dir)
        meta_dataset_name = dataset_name[:dataset_name.find('_')]
        print('meta_dataset_name', meta_dataset_name)

        self.unified_novel_classes_eval = cfg.MULTI_DATASET.UNIFIED_NOVEL_CLASSES_EVAL
        if self.unified_novel_classes_eval:
            match_novel_classes_file = cfg.MULTI_DATASET.MATCH_NOVEL_CLASSES_FILE
            print('Loading map back from', match_novel_classes_file)
            novel_classes_map = json.load(open(match_novel_classes_file,
                                               'r'))[meta_dataset_name]
            self.map_back = {}
            for c, match in enumerate(novel_classes_map):
                for m in match:
                    self.map_back[m] = c
        else:
            unified_label_data = json.load(open(unified_label_file, 'r'))
            label_map = unified_label_data['label_map']
            label_map = label_map[meta_dataset_name]
            self.map_back = {int(v): i for i, v in enumerate(label_map)}

        self._logger.info("saving outputs to {}".format(self._output_dir))
        self._temp_dir = self._output_dir + '/cityscapes_style_eval_tmp/'
        self._logger.info(
            "Writing cityscapes results to temporary directory {} ...".format(
                self._temp_dir))
        PathManager.mkdirs(self._temp_dir)
Exemplo n.º 20
0
    def test_bad_args(self) -> None:
        with self.assertRaises(NotImplementedError):
            PathManager.copy(
                self._remote_uri,
                self._remote_uri,
                foo="foo"  # type: ignore
            )
        with self.assertRaises(NotImplementedError):
            PathManager.exists(self._remote_uri, foo="foo")  # type: ignore
        with self.assertRaises(ValueError):
            PathManager.get_local_path(
                self._remote_uri,
                foo="foo"  # type: ignore
            )
        with self.assertRaises(NotImplementedError):
            PathManager.isdir(self._remote_uri, foo="foo")  # type: ignore
        with self.assertRaises(NotImplementedError):
            PathManager.isfile(self._remote_uri, foo="foo")  # type: ignore
        with self.assertRaises(NotImplementedError):
            PathManager.ls(self._remote_uri, foo="foo")  # type: ignore
        with self.assertRaises(NotImplementedError):
            PathManager.mkdirs(self._remote_uri, foo="foo")  # type: ignore
        with self.assertRaises(ValueError):
            PathManager.open(self._remote_uri, foo="foo")  # type: ignore
        with self.assertRaises(NotImplementedError):
            PathManager.rm(self._remote_uri, foo="foo")  # type: ignore

        PathManager.set_strict_kwargs_checking(False)

        PathManager.get_local_path(self._remote_uri, foo="foo")  # type: ignore
        f = PathManager.open(self._remote_uri, foo="foo")  # type: ignore
        f.close()
        PathManager.set_strict_kwargs_checking(True)
Exemplo n.º 21
0
def save_checkpoint(checkpoint_folder, state, checkpoint_file=CHECKPOINT_FILE):
    """
    Saves a state variable to the specified checkpoint folder. Returns the filename
    of the checkpoint if successful. Raises an exception otherwise.
    """

    # make sure that we have a checkpoint folder:
    if not PathManager.isdir(checkpoint_folder):
        try:
            PathManager.mkdirs(checkpoint_folder)
        except BaseException:
            logging.warning("Could not create folder %s." % checkpoint_folder)
            raise

    # write checkpoint atomically:
    try:
        full_filename = f"{checkpoint_folder}/{checkpoint_file}"
        with PathManager.open(full_filename, "wb") as f:
            torch.save(state, f)
        return full_filename
    except BaseException:
        logging.warning("Unable to write checkpoint to %s." %
                        checkpoint_folder,
                        exc_info=True)
        raise
Exemplo n.º 22
0
def convert_to_coco_json(dataset_name, output_folder="", allow_cached=True):
    """
    Converts dataset into COCO format and saves it to a json file.
    dataset_name must be registered in DatastCatalog and in detectron2's standard format.

    Args:
        dataset_name:
            reference from the config file to the catalogs
            must be registered in DatastCatalog and in detectron2's standard format
        output_folder: where json file will be saved and loaded from
        allow_cached: if json file is already present then skip conversion
    Returns:
        cache_path: path to the COCO-format json file
    """

    # TODO: The dataset or the conversion script *may* change,
    # a checksum would be useful for validating the cached data
    cache_path = os.path.join(output_folder,
                              f"{dataset_name}_coco_format.json")
    PathManager.mkdirs(output_folder)
    if os.path.exists(cache_path) and allow_cached:
        logger.info(
            f"Reading cached annotations in COCO format from:{cache_path} ...")
    else:
        logger.info(
            f"Converting dataset annotations in '{dataset_name}' to COCO format ...)"
        )
        coco_dict = convert_to_coco_dict(dataset_name)

        with PathManager.open(cache_path, "w") as json_file:
            logger.info(f"Caching annotations in COCO format: {cache_path}")
            json.dump(coco_dict, json_file)

    return cache_path
Exemplo n.º 23
0
def save_checkpoint(path_to_job, model, optimizer, epoch, cfg):
    """
    Save a checkpoint.
    Args:
        model (model): model to save the weight to the checkpoint.
        optimizer (optim): optimizer to save the historical state.
        epoch (int): current number of epoch of the model.
        cfg (CfgNode): configs to save.
    """
    # Save checkpoints only from the master process.
    if not du.is_master_proc(cfg.NUM_GPUS * cfg.NUM_SHARDS):
        return
    # Ensure that the checkpoint dir exists.
    PathManager.mkdirs(get_checkpoint_dir(path_to_job))
    # Omit the DDP wrapper in the multi-gpu setting.
    sd = model.module.state_dict() if cfg.NUM_GPUS > 1 else model.state_dict()
    normalized_sd = sub_to_normal_bn(sd)

    # Record the state.
    checkpoint = {
        "epoch": epoch,
        "model_state": normalized_sd,
        "optimizer_state": optimizer.state_dict(),
        "cfg": cfg.dump(),
    }
    # Write the checkpoint.
    path_to_checkpoint = get_path_to_checkpoint(path_to_job, epoch + 1)
    with PathManager.open(path_to_checkpoint, "wb") as f:
        torch.save(checkpoint, f)
    return path_to_checkpoint
Exemplo n.º 24
0
def do_feature_extraction(cfg, model, image_dir, image_list):
    with inference_context(model):
        # dump_folder = os.path.join(cfg.OUTPUT_DIR, "features")
        dump_folder = 'gridfeats'
        PathManager.mkdirs(dump_folder)
        # data_loader = build_detection_test_loader_with_attributes(cfg, dataset_name)
        extract_grid_feature_on_dataset(model, image_dir, image_list,
                                        dump_folder)
Exemplo n.º 25
0
    def evaluate(self):
        """
        Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval):

        * Mean intersection-over-union averaged across classes (mIoU)
        * Frequency Weighted IoU (fwIoU)
        * Mean pixel accuracy averaged across classes (mACC)
        * Pixel Accuracy (pACC)
        """
        if self._distributed:
            synchronize()
            conf_matrix_list = all_gather(self._conf_matrix)
            self._predictions = all_gather(self._predictions)
            self._predictions = list(itertools.chain(*self._predictions))
            if not is_main_process():
                return

            self._conf_matrix = np.zeros_like(self._conf_matrix)
            for conf_matrix in conf_matrix_list:
                self._conf_matrix += conf_matrix

        if self._output_dir:
            PathManager.mkdirs(self._output_dir)
            file_path = os.path.join(self._output_dir,
                                     "sem_seg_predictions.json")
            with PathManager.open(file_path, "w") as f:
                f.write(json.dumps(self._predictions))

        acc = np.zeros(self._num_classes, dtype=np.float)
        iou = np.zeros(self._num_classes, dtype=np.float)
        tp = self._conf_matrix.diagonal()[:-1].astype(np.float)
        pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(np.float)
        class_weights = pos_gt / np.sum(pos_gt)
        pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(np.float)
        acc_valid = pos_gt > 0
        acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid]
        iou_valid = (pos_gt + pos_pred) > 0
        union = pos_gt + pos_pred - tp
        iou[acc_valid] = tp[acc_valid] / union[acc_valid]
        macc = np.sum(acc) / np.sum(acc_valid)
        miou = np.sum(iou) / np.sum(iou_valid)
        fiou = np.sum(iou * class_weights)
        pacc = np.sum(tp) / np.sum(pos_gt)

        res = {}
        res["mIoU"] = 100 * miou
        res["fwIoU"] = 100 * fiou
        res["mACC"] = 100 * macc
        res["pACC"] = 100 * pacc

        if self._output_dir:
            file_path = os.path.join(self._output_dir,
                                     "sem_seg_evaluation.pth")
            with PathManager.open(file_path, "wb") as f:
                torch.save(res, f)
        results = OrderedDict({"sem_seg": res})
        self._logger.info(results)
        return results
Exemplo n.º 26
0
    def evaluate(self):
        if self._distributed:
            comm.synchronize()
            self._predictions = comm.gather(self._predictions, dst=0)
            self._predictions = list(itertools.chain(*self._predictions))

            if not comm.is_main_process():
                return

        if len(self._predictions) == 0:
            self._logger.warning(
                "[LVISEvaluator] Did not receive valid predictions.")
            return {}

        self._logger.info("Preparing results in the OID format ...")
        _unified_results = list(
            itertools.chain(*[x["instances"] for x in self._predictions]))

        if self._output_dir:
            PathManager.mkdirs(self._output_dir)

        file_path = os.path.join(self._output_dir,
                                 "unified_instances_results.json")
        self._logger.info("Saving results to {}".format(file_path))
        with PathManager.open(file_path, "w") as f:
            f.write(json.dumps(_unified_results))
            f.flush()

        self._oid_results = map_back_unified_id(_unified_results,
                                                self.map_back)

        # unmap the category ids for LVIS (from 0-indexed to 1-indexed)
        for result in self._oid_results:
            result["category_id"] += 1

        PathManager.mkdirs(self._output_dir)
        file_path = os.path.join(self._output_dir,
                                 "oid_instances_results.json")
        self._logger.info("Saving results to {}".format(file_path))
        with PathManager.open(file_path, "w") as f:
            f.write(json.dumps(self._oid_results))
            f.flush()

        if not self._do_evaluation:
            self._logger.info("Annotations are not available for evaluation.")
            return

        self._logger.info("Evaluating predictions ...")
        self._results = OrderedDict()
        res = _evaluate_predictions_on_oid(self._oid_api,
                                           file_path,
                                           eval_seg=self._mask_on)
        self._results['bbox'] = res

        return copy.deepcopy(self._results)
Exemplo n.º 27
0
    def evaluate(
            self,
            max_table_size: int = 25) -> "OrderedDict[str, Dict[str, Any]]":
        # if distributed, gather and sum confusion matrices
        cm: torch.Tensor
        if self._distributed:
            comm.synchronize()
            cms = comm.gather(self._cm, dst=0)
            if not comm.is_main_process():
                return OrderedDict()
            cm = torch.stack(cms, dim=0).sum(dim=0)
        else:
            cm = self._cm

        # saving confusion matrix
        if self._output_dir:
            PathManager.mkdirs(self._output_dir)
            file_path = os.path.join(self._output_dir,
                                     self._output_name + ".pth")
            with PathManager.open(file_path, "wb") as f:
                torch.save(cm, f)
            file_path = os.path.join(self._output_dir,
                                     self._output_name + ".json")
            with PathManager.open(file_path, "w") as f:
                json_dict = {"confusion_matrix": cm.to("cpu").tolist()}
                json.dump(json_dict, f)

        # calculating accuracy
        accuracy = self.accuracy(cm)

        # displaying confusion matrix as table (if it isn't too huge)
        if self.num_classes <= max_table_size:
            headers, showindex = (), "default"
            if self._metadata is not None:
                headers = self._metadata.get("classes", default=headers)
                showindex = self._metadata.get("classes", default=showindex)

            table = tabulate(
                cm,
                headers=headers,
                showindex=showindex,
                tablefmt="pipe",
                floatfmt=".0f",
                numalign="left",
            )
            self._logger.info(table)

        # collect and return results
        results: OrderedDict[str,
                             Dict[str,
                                  float]] = OrderedDict([(self._task_name, {
                                      "top1": accuracy
                                  })])
        self._logger.info(results)
        return results
Exemplo n.º 28
0
def save_video(video, output_dir):
    """
    Save video
    Args:
        video: shape [T, H, W, C]
        video_dir: save video under video_dir/sample/
    """
    PathManager.mkdirs(output_dir)
    for frame_idx in range(len(video)):
        frame_path = os.path.join(output_dir, f'{frame_idx}.png')
        save_image(video[frame_idx], frame_path)
Exemplo n.º 29
0
    def evaluate(self):
        if self._distributed:
            synchronize()
            conf_matrix_list = all_gather(self._conf_matrix)
            self._predictions = all_gather(self._predictions)
            self._predictions = list(itertools.chain(*self._predictions))
            if not is_main_process():
                return

            self._conf_matrix = np.zeros_like(self._conf_matrix)
            for conf_matrix in conf_matrix_list:
                self._conf_matrix += conf_matrix

        if self._output_dir:
            PathManager.mkdirs(self._output_dir)
            file_path = os.path.join(self._output_dir,
                                     "sem_seg_predictions.json")
            with PathManager.open(file_path, "w") as f:
                f.write(json.dumps(self._predictions))

        acc = np.full(self._num_classes, np.nan, dtype=np.float)
        iou = np.full(self._num_classes, np.nan, dtype=np.float)
        tp = self._conf_matrix.diagonal()[:-1].astype(np.float)
        pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(np.float)
        class_weights = pos_gt / np.sum(pos_gt)
        pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(np.float)
        acc_valid = pos_gt > 0
        acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid]
        iou_valid = (pos_gt + pos_pred) > 0
        union = pos_gt + pos_pred - tp
        iou[acc_valid] = tp[acc_valid] / union[acc_valid]
        macc = np.sum(acc[acc_valid]) / np.sum(acc_valid)
        miou = np.sum(iou[acc_valid]) / np.sum(iou_valid)
        fiou = np.sum(iou[acc_valid] * class_weights[acc_valid])
        pacc = np.sum(tp) / np.sum(pos_gt)

        res = {}
        res["mIoU"] = 100 * miou
        res["fwIoU"] = 100 * fiou
        for i, name in enumerate(self._class_names):
            res["IoU-{}".format(name)] = 100 * iou[i]
        res["mACC"] = 100 * macc
        res["pACC"] = 100 * pacc
        for i, name in enumerate(self._class_names):
            res["ACC-{}".format(name)] = 100 * acc[i]

        if self._output_dir:
            file_path = os.path.join(self._output_dir,
                                     "sem_seg_evaluation.pth")
            with PathManager.open(file_path, "wb") as f:
                torch.save(res, f)
        results = OrderedDict({"sem_seg": res})
        self._logger.info(results)
        return results
Exemplo n.º 30
0
Arquivo: io.py Projeto: iseessel/vissl
def makedir(dir_path):
    """
    Create the directory if it does not exist.
    """
    is_success = False
    try:
        if not PathManager.exists(dir_path):
            PathManager.mkdirs(dir_path)
        is_success = True
    except BaseException:
        logging.info(f"Error creating directory: {dir_path}")
    return is_success