Beispiel #1
0
 def create_context(cls: type, args: argparse.Namespace,
                    cfg: CfgNode) -> Dict[str, Any]:
     vis_specs = args.visualizations.split(",")
     visualizers = []
     extractors = []
     for vis_spec in vis_specs:
         texture_atlas = get_texture_atlas(args.texture_atlas)
         texture_atlases_dict = get_texture_atlases(
             args.texture_atlases_map)
         vis = cls.VISUALIZERS[vis_spec](
             cfg=cfg,
             texture_atlas=texture_atlas,
             texture_atlases_dict=texture_atlases_dict,
         )
         visualizers.append(vis)
         extractor = create_extractor(vis)
         extractors.append(extractor)
     visualizer = CompoundVisualizer(visualizers)
     extractor = CompoundExtractor(extractors)
     context = {
         "extractor": extractor,
         "visualizer": visualizer,
         "out_fname": args.output,
         "entry_idx": 0,
     }
     return context
Beispiel #2
0
def prepare_context(vis_specs):
    print('prepare context with specs: ', vis_specs)
    visualizers = []
    extractors = []
    for vis_spec in vis_specs:
        vis = VISUALIZERS[vis_spec]()
        visualizers.append(vis)
        extractor = create_extractor(vis)
        extractors.append(extractor)
    visualizer = CompoundVisualizer(visualizers)
    extractor = CompoundExtractor(extractors)
    context = {"extractor": extractor, "visualizer": visualizer}
    return context
def create_context() -> Dict[str, Any]:
    vis_specs = ["dp_contour"]
    visualizers = []
    extractors = []
    for vis_spec in vis_specs:
        vis = VISUALIZERS[vis_spec]()
        visualizers.append(vis)
        extractor = create_extractor(vis)
        extractors.append(extractor)
    visualizer = CompoundVisualizer(visualizers)
    extractor = CompoundExtractor(extractors)
    context = {
        "extractor": extractor,
        "visualizer": visualizer,
        "entry_idx": 0,
    }
    return context
Beispiel #4
0
 def create_context(cls: type, args: argparse.Namespace) -> Dict[str, Any]:
     vis_specs = args.visualizations.split(",")
     visualizers = []
     extractors = []
     for vis_spec in vis_specs:
         vis = cls.VISUALIZERS[vis_spec]()
         visualizers.append(vis)
         extractor = create_extractor(vis)
         extractors.append(extractor)
     visualizer = CompoundVisualizer(visualizers)
     extractor = CompoundExtractor(extractors)
     context = {
         "extractor": extractor,
         "visualizer": visualizer,
         "out_fname": args.output,
         "entry_idx": 0,
     }
     return context
    def __init__(
            self,
            model=MODELS["chimps"],
            model_cache_dir: Path = Path(".zamba_cache"),
            download_region=RegionEnum("us"),
    ):
        """Create a DensePoseManager object.

        Parameters
        ----------
        model : dict, optional (default MODELS['chimps'])
            A dictionary with the densepose model defintion like those defined in MODELS.
        """
        if not DENSEPOSE_AVAILABLE:
            raise ImportError(
                "Densepose not installed. See: https://zamba.drivendata.org/docs/stable/models/densepose/#installation"
            )

        # setup configuration for densepose
        self.cfg = get_cfg()
        add_densepose_config(self.cfg)

        self.cfg.merge_from_file(model["config"])

        if not (model_cache_dir / model["weights"]).exists():
            model_cache_dir.mkdir(parents=True, exist_ok=True)
            self.cfg.MODEL.WEIGHTS = download_weights(model["weights"],
                                                      model_cache_dir,
                                                      download_region)

        # automatically use CPU if no cuda available
        if not torch.cuda.is_available():
            self.cfg.MODEL.DEVICE = "cpu"

        self.cfg.freeze()

        logging.getLogger("fvcore").setLevel(
            "CRITICAL")  # silence noisy detectron2 logging
        # set up predictor with the configuration
        self.predictor = DefaultPredictor(self.cfg)

        # we have a specific texture atlas for chimps with relevant regions
        # labeled that we can use instead of the default segmentation
        self.visualizer = model["viz_class"](
            self.cfg,
            device=self.cfg.MODEL.DEVICE,
            **model.get("viz_class_kwargs", {}),
        )

        # set up utilities for use with visualizer
        self.vis_extractor = create_extractor(self.visualizer)
        self.vis_embedder = build_densepose_embedder(self.cfg)
        self.vis_class_to_mesh_name = get_class_to_mesh_name_mapping(self.cfg)
        self.vis_mesh_vertex_embeddings = {
            mesh_name: self.vis_embedder(mesh_name).to(self.cfg.MODEL.DEVICE)
            for mesh_name in self.vis_class_to_mesh_name.values()
            if self.vis_embedder.has_embeddings(mesh_name)
        }

        if "anatomy_color_mapping" in model:
            self.anatomy_color_mapping = pd.read_csv(
                model["anatomy_color_mapping"], index_col=0)
        else:
            self.anatomy_color_mapping = None