Exemplo n.º 1
0
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved

from detectron2.utils.registry import Registry

DENSEPOSE_LOSS_REGISTRY = Registry("DENSEPOSE_LOSS")
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from detectron2.utils.registry import Registry

META_ARCH_REGISTRY = Registry("META_ARCH")  # noqa F401 isort:skip
"""
Registry for meta-architectures, i.e. the whole model.
"""


def build_model(cfg):
    """
    Built the whole model, defined by `cfg.MODEL.META_ARCHITECTURE`.
    """
    meta_arch = cfg.MODEL.META_ARCHITECTURE
    return META_ARCH_REGISTRY.get(meta_arch)(cfg)
Exemplo n.º 3
0
import torch
from detectron2.modeling import ROI_HEADS_REGISTRY, StandardROIHeads
from detectron2.modeling.poolers import ROIPooler
from detectron2.layers import ShapeSpec
from detectron2.modeling.roi_heads import select_foreground_proposals
from detectron2.structures import Boxes
from detectron2.utils.registry import Registry

ROI_HUMAN_HEAD_REGISTRY = Registry("HUMAN_ROI_HEADS")
ROI_HUMAN_HEAD_REGISTRY.__doc__ == """
Registry for contact heads, which make contact predictions from per-region features.
The registered object will be called with obj(cfg,, input_shape).
"""


@ROI_HUMAN_HEAD_REGISTRY.register()
class HumanROIHeads(StandardROIHeads):
    def __init__(self, cfg, input_shape):
        super(HumanROIHeads, self).__init__(cfg, input_shape)
        self.config = cfg

    def forward(self, images, features, proposals, targets=None):
        if self.training:
            # assert targets
            print(targets)
            proposals = self.label_and_sample_proposals(proposals, targets)

        del targets

        if self.training:
            losses = self._forward_box(features, proposals)
Exemplo n.º 4
0
from fvcore.transforms.transform import (
    BlendTransform,
    CropTransform,
    HFlipTransform,
    NoOpTransform,
    Transform,
    VFlipTransform,
)
from PIL import Image

from detectron2.utils.registry import Registry

from .augmentation import Augmentation
from .transform import ExtentTransform, ResizeTransform, RotationTransform

AUGMENTATION_REGISTRY = Registry("AUGMENTATION")
AUGMENTATION_REGISTRY.__doc__ = """
Registry for augmentation.
"""

__all__ = [
    "AUGMENTATION_REGISTRY",
    "RandomApply",
    "RandomBrightness",
    "RandomContrast",
    "RandomCrop",
    "RandomExtent",
    "RandomFlip",
    "RandomSaturation",
    "RandomLighting",
    "RandomRotation",
Exemplo n.º 5
0
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F

from detectron2.layers import ShapeSpec, cat
from detectron2.utils.events import get_event_storage
from detectron2.utils.registry import Registry

POINT_HEAD_REGISTRY = Registry("POINT_HEAD")
POINT_HEAD_REGISTRY.__doc__ = """
Registry for point heads, which makes prediction for a given set of per-point features.

The registered object will be called with `obj(cfg, input_shape)`.
"""


def roi_mask_point_loss(mask_logits, instances, point_labels):
    """
    Compute the point-based loss for instance segmentation mask predictions
    given point-wise mask prediction and its corresponding point-wise labels.
    Args:
        mask_logits (Tensor): A tensor of shape (R, C, P) or (R, 1, P) for class-specific or
            class-agnostic, where R is the total number of predicted masks in all images, C is the
            number of foreground classes, and P is the number of points sampled for each mask.
            The values are logits.
        instances (list[Instances]): A list of N Instances, where N is the number of images
            in the batch. These instances are in 1:1 correspondence with the `mask_logits`. So, i_th
            elememt of the list contains R_i objects and R_1 + ... + R_N is equal to R.
Exemplo n.º 6
0
from detectron2.layers import Conv2d, ShapeSpec
from detectron2.structures import ImageList
from detectron2.utils.registry import Registry
from detectron2.utils.events import get_event_storage

from ..backbone import build_backbone
from ..postprocessing import sem_seg_postprocess
from .build import META_ARCH_REGISTRY

__all__ = [
    "SemanticSegmentor", "SEM_SEG_HEADS_REGISTRY", "SemSegFPNHead",
    "build_sem_seg_head"
]

SEM_SEG_HEADS_REGISTRY = Registry("SEM_SEG_HEADS")
SEM_SEG_HEADS_REGISTRY.__doc__ = """
Registry for semantic segmentation heads, which make semantic segmentation predictions
from feature maps.
"""


@META_ARCH_REGISTRY.register()
class SemanticSegmentor(nn.Module):
    """
    Main class for semantic segmentation architectures.
    """
    def __init__(self, cfg):
        super().__init__()

        self.device = torch.device(cfg.MODEL.DEVICE)
Exemplo n.º 7
0
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import numpy as np
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F

from detectron2.layers import Conv2d, ShapeSpec, get_norm
from detectron2.utils.registry import Registry

ROI_BOX_HEAD_REGISTRY = Registry("ROI_BOX_HEAD")
"""
Registry for box heads, which make box predictions from per-region features.
"""


@ROI_BOX_HEAD_REGISTRY.register()
class EmbedFastRCNNConvFCHead(nn.Module):
    """
    A head with several 3x3 conv layers (each followed by norm & relu) and
    several fc layers (each followed by relu).

    Is mostly based off of FastRCNNConvFCHead, except that last FC layer is
    replaced with an embedding being returned.
    """
    def __init__(self, cfg, input_shape: ShapeSpec):
        """
        The following attributes are parsed from config:
            num_conv, num_fc: the number of conv/fc layers
            conv_dim/fc_dim: the dimension of the conv/fc layers
            norm: normalization for the conv layers
Exemplo n.º 8
0
from detectron2.config import configurable
from detectron2.layers import ShapeSpec, cat
from detectron2.structures import Boxes, ImageList, Instances, pairwise_iou
from detectron2.utils.events import get_event_storage
from detectron2.utils.memory import retry_if_cuda_oom
from detectron2.utils.registry import Registry

from ..anchor_generator import build_anchor_generator
from ..box_regression import Box2BoxTransform, _dense_box_regression_loss
from ..matcher import Matcher
from ..sampling import subsample_labels
from .build import PROPOSAL_GENERATOR_REGISTRY
from .proposal_utils import find_top_rpn_proposals

RPN_HEAD_REGISTRY = Registry("RPN_HEAD")
RPN_HEAD_REGISTRY.__doc__ = """
Registry for RPN heads, which take feature maps and perform
objectness classification and bounding box regression for anchors.

The registered object will be called with `obj(cfg, input_shape)`.
The call should return a `nn.Module` object.
"""
"""
Shape shorthand in this module:

    N: number of images in the minibatch
    L: number of feature maps per image on which RPN is run
    A: number of cell anchors (must be the same for all feature maps)
    Hi, Wi: height and width of the i-th feature map
    B: size of the box parameterization
Exemplo n.º 9
0
from torch.nn import functional as F

from detectron2.layers import Conv2d, ShapeSpec
from detectron2.structures import ImageList
from detectron2.utils.registry import Registry

from ..backbone import build_backbone
from ..postprocessing import sem_seg_postprocess
from .build import META_ARCH_REGISTRY

__all__ = [
    "SemanticSegmentor", "SEM_SEG_HEADS_REGISTRY", "SemSegFPNHead",
    "build_sem_seg_head"
]

SEM_SEG_HEADS_REGISTRY = Registry("SEM_SEG_HEADS")
"""
Registry for semantic segmentation heads, which make semantic segmentation predictions
from feature maps.
"""


@META_ARCH_REGISTRY.register()
class SemanticSegmentor(nn.Module):
    """
    Main class for semantic segmentation architectures.
    """
    def __init__(self, cfg):
        super().__init__()

        self.device = torch.device(cfg.MODEL.DEVICE)
Exemplo n.º 10
0
from detectron2.config import CfgNode
from detectron2.layers import ShapeSpec
from detectron2.utils.registry import Registry
import torch

from ..common.types import Losses

__all__ = [
    "UNSUPERVISED_HEAD_REGISTRY",
    "UnsupervisedHead",
    "UnsupervisedOutput",
    "build_unsupervised_head",
]

UNSUPERVISED_HEAD_REGISTRY = Registry("UNSUPERVISED_HEAD")
UNSUPERVISED_HEAD_REGISTRY.__doc__ = """
Registry for unsupervised heads/objectives for models like :class:`UxRCNN`.
Take feature maps and return some target function.
"""

logger = logging.getLogger(__name__)

UnsupervisedOutput = Dict[str, torch.Tensor]


class UnsupervisedHead(torch.nn.Module):
    """
    An unsupervised head that accepts feature maps from the backbone and
    carries out some sort of additional objective.
Exemplo n.º 11
0
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from torch import nn
from torch.nn import functional as F

from detectron2.layers import Conv2d, ConvTranspose2d, interpolate
from detectron2.structures.boxes import matched_boxlist_iou
from detectron2.utils.registry import Registry

from .structures import DensePoseOutput

ROI_DENSEPOSE_HEAD_REGISTRY = Registry("ROI_DENSEPOSE_HEAD")


def initialize_module_params(module):
    for name, param in module.named_parameters():
        if "bias" in name:
            nn.init.constant_(param, 0)
        elif "weight" in name:
            nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")


@ROI_DENSEPOSE_HEAD_REGISTRY.register()
class DensePoseV1ConvXHead(nn.Module):
    def __init__(self, cfg, input_channels):
        super(DensePoseV1ConvXHead, self).__init__()
        # fmt: off
        hidden_dim = cfg.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_DIM
        kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_KERNEL
        self.n_stacked_convs = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_STACKED_CONVS
        # fmt: on
Exemplo n.º 12
0
import logging
import numpy as np
from typing import Dict, List, Optional, Tuple, Union
import torch
from torch import nn
import torch.nn.functional as F
import fvcore.nn.weight_init as weight_init

from detectron2.layers import ShapeSpec, Conv2d
from detectron2.structures import Boxes, ImageList, Instances, pairwise_iou
from detectron2.utils.events import get_event_storage
from detectron2.utils.registry import Registry

RECONSTRUCT_HEADS_REGISTRY = Registry("RECONSTRUCT_HEADS")
RECONSTRUCT_HEADS_REGISTRY.__doc__ = """
Registry for reconstruction heads for use with FPN backbones.
ReconstructHeads take in a feature map and reconstruct the image input
to the original network.

The registered object will be called with `obj(cfg, input_shape, output_shape)`.
The call is expected to return an :class:`ReconstructHeads`.
"""

logger = logging.getLogger(__name__)


def build_reconstruct_heads(cfg, input_shape):
    """
    Build ReconstructHeads defined by `cfg.MODEL.RECONSTRUCT_HEADS.NAME`.
    """
    name = cfg.MODEL.RECONSTRUCT_HEADS.NAME
Exemplo n.º 13
0
    SEM_SEG_HEADS_REGISTRY,
    build_backbone,
    build_sem_seg_head,
)
from detectron2.modeling.postprocessing import sem_seg_postprocess
from detectron2.projects.deeplab import DeepLabV3PlusHead
from detectron2.projects.deeplab.loss import DeepLabCE
from detectron2.structures import BitMasks, ImageList, Instances
from detectron2.utils.registry import Registry

from .post_processing import get_panoptic_segmentation

__all__ = ["PanopticDeepLab", "INS_EMBED_BRANCHES_REGISTRY", "build_ins_embed_branch"]


INS_EMBED_BRANCHES_REGISTRY = Registry("INS_EMBED_BRANCHES")
INS_EMBED_BRANCHES_REGISTRY.__doc__ = """
Registry for instance embedding branches, which make instance embedding
predictions from feature maps.
"""


@META_ARCH_REGISTRY.register()
class PanopticDeepLab(nn.Module):
    """
    Main class for panoptic segmentation architectures.
    """

    def __init__(self, cfg):
        super().__init__()
        self.backbone = build_backbone(cfg)
Exemplo n.º 14
0
import logging
import numpy as np
from typing import Dict, List, Optional, Tuple, Union
import torch
from torch import nn
import torch.nn.functional as F
import fvcore.nn.weight_init as weight_init
from torch.distributions import Normal, Uniform
from detectron2.layers import ShapeSpec, Conv2d
from detectron2.structures import Boxes, ImageList, Instances, pairwise_iou
from detectron2.utils.events import get_event_storage
from detectron2.utils.registry import Registry

QUANTIZER_REGISTRY = Registry("QUANTIZERS")
QUANTIZER_REGISTRY.__doc__ = """
Registry for quantizers. These are for use in compressive networks.
The registered object will be called with `obj(cfg, input_shape)`.
The call is expected to return an :class:`Quantizer`. The output shape
is assumed to be the same as the input shape, but values are quantized.
"""

logger = logging.getLogger(__name__)


def build_quantizer(cfg, input_shape):
    """
    Build ReconstructHeads defined by `cfg.MODEL.RECONSTRUCT_HEADS.NAME`.
    """
    name = cfg.MODEL.QUANTIZER.NAME
    return QUANTIZER_REGISTRY.get(name)(cfg, input_shape)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn

from .roi_box_feature_extractors import make_roi_box_feature_extractor
from .roi_box_predictors import make_roi_box_predictor
from .inference import make_roi_box_post_processor
from .loss import make_roi_box_loss_evaluator
from detectron2.utils.registry import Registry

ROI_BOX_HEAD_REGISTRY = Registry("ROI_BOX_HEAD")
ROI_BOX_HEAD_REGISTRY.__doc__ = ""


@ROI_BOX_HEAD_REGISTRY.register()
class ROIBoxHead(nn.Module):
    """
    Generic Box Head class.
    """
    def __init__(self, cfg):
        super().__init__()
        self.feature_extractor = make_roi_box_feature_extractor(cfg)
        self.predictor = make_roi_box_predictor(cfg)
        self.post_processor = make_roi_box_post_processor(cfg)
        self.loss_evaluator = make_roi_box_loss_evaluator(cfg)

    def forward(self, features, proposals, targets=None):
        """
        Arguments:
            features (list[Tensor]): feature-maps from possibly several levels
            proposals (list[BoxList]): proposal boxes
Exemplo n.º 16
0
import numpy as np

import fvcore.nn.weight_init as weight_init
import torch
from detectron2.layers import Conv2d, ShapeSpec, cat, get_norm
from detectron2.utils.events import get_event_storage
from detectron2.utils.registry import Registry
from torch import nn
from torch.nn import functional as F

_TOTAL_SKIPPED = 0

ROI_FIBERWIDTH_HEAD_REGISTRY = Registry("ROI_FIBERWIDTH_HEAD")
ROI_FIBERWIDTH_HEAD_REGISTRY.__doc__ = """
Registry for fiberwidth heads, which make fiberwidth predictions from per-region features.

The registered object will be called with `obj(cfg, input_shape)`.
"""


def build_fiberwidth_head(cfg, input_shape):
    """
    Build a fiberwidth head from `cfg.MODEL.ROI_FIBERWIDTH_HEAD.NAME`.
    """
    name = cfg.MODEL.ROI_FIBERWIDTH_HEAD.NAME
    return ROI_FIBERWIDTH_HEAD_REGISTRY.get(name)(cfg, input_shape)


def fiberwidth_loss(pred_fiberwidths, instances):
    """
    Arguments:
Exemplo n.º 17
0
from typing import Dict

import numpy as np
import torch
import torch.nn.functional as F
from detectron2.modeling import build_backbone
from detectron2.utils.registry import Registry
from detr.models.backbone import Joiner
from detr.models.position_encoding import PositionEmbeddingSine
from detr.util.misc import NestedTensor
from torch import nn

DETR_MODEL_REGISTRY = Registry("DETR_MODEL")


def build_detr_backbone(cfg):
    if "resnet" in cfg.MODEL.BACKBONE.NAME.lower():
        d2_backbone = ResNetMaskedBackbone(cfg)
    elif "fbnet" in cfg.MODEL.BACKBONE.NAME.lower():
        d2_backbone = FBNetMaskedBackbone(cfg)
    elif cfg.MODEL.BACKBONE.SIMPLE:
        d2_backbone = SimpleSingleStageBackbone(cfg)
    else:
        raise NotImplementedError

    N_steps = cfg.MODEL.DETR.HIDDEN_DIM // 2
    centered_position_encoding = cfg.MODEL.DETR.CENTERED_POSITION_ENCODIND

    backbone = Joiner(
        d2_backbone,
        PositionEmbeddingSine(
Exemplo n.º 18
0
import torch
from torch import nn
from detectron2.utils.registry import Registry

__all__ = ["build_depth_head", "PlaneRCNNDepthHead", "DEPTH_HEAD_REGISTRY"]

DEPTH_HEAD_REGISTRY = Registry("DEPTH_HEAD")
DEPTH_HEAD_REGISTRY.__doc__ = """
Registry for depth head in a generalized R-CNN model.
ROIHeads take feature maps and predict depth.
The registered object will be called with `obj(cfg, input_shape)`.
The call is expected to return an :class:`nn.module`.
"""


def l1LossMask(pred, gt, mask):
    """L1 loss with a mask"""
    return torch.sum(torch.abs(pred - gt) * mask) / torch.clamp(mask.sum(),
                                                                min=1)


def build_depth_head(cfg):
    """
    Build ROIHeads defined by `cfg.MODEL.ROI_HEADS.NAME`.
    """
    name = cfg.MODEL.DEPTH_HEAD.NAME
    return DEPTH_HEAD_REGISTRY.get(name)(cfg)


def conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=None):
    return nn.Sequential(
Exemplo n.º 19
0
from typing import Dict, List
import torch
import torch.nn.functional as F
from torch import nn
import pdb
from detectron2.layers import ShapeSpec
from detectron2.utils.registry import Registry

from ..anchor_generator import build_anchor_generator
from ..box_regression import Box2BoxTransform
from ..matcher import Matcher
from .build import PROPOSAL_GENERATOR_REGISTRY
from .rpn_outputs import RPNOutputs, find_top_rpn_proposals
import numpy as np

RPN_HEAD_REGISTRY = Registry("RPN_HEAD")
"""
Registry for RPN heads, which take feature maps and perform
objectness classification and bounding box regression for anchors.
"""


def build_rpn_head(cfg, input_shape):
    """
    Build an RPN head defined by `cfg.MODEL.RPN.HEAD_NAME`.
    """
    name = cfg.MODEL.RPN.HEAD_NAME     # "StandardRPNHead"
    
    return RPN_HEAD_REGISTRY.get(name)(cfg, input_shape)

import copy
import math
from typing import List
import torch
from torch import nn

from detectron2.layers import ShapeSpec
from detectron2.structures import RotatedBoxes
from detectron2.utils.registry import Registry
from detectron2.modeling.anchor_generator import DefaultAnchorGenerator

ANCHOR_GENERATOR_REGISTRY = Registry("RELATION_ANCHOR_GENERATOR")


@ANCHOR_GENERATOR_REGISTRY.register()
class RelationAnchorGenerator(DefaultAnchorGenerator):
    """
       For a set of image sizes and feature maps, computes a set of anchors based on text and arrow/T-bars.
    """
    def __init__(self, cfg, input_shape: List[ShapeSpec], instance_pred):
        super().__init__()
        # fmt: off
        sizes = cfg.MODEL.ANCHOR_GENERATOR.SIZES
        aspect_ratios = cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS
        self.strides = [x.stride for x in input_shape]
        # fmt: on
        """
        sizes (list[list[int]]): sizes[i] is the list of anchor sizes to use
            for the i-th feature map. If len(sizes) == 1, then the same list of
            anchor sizes, given by sizes[0], is used for all feature maps. Anchor
            sizes are given in absolute lengths in units of the input image;
Exemplo n.º 21
0
from detectron2.layers import ShapeSpec
from detectron2.utils.registry import Registry

SSHEAD_REGISTRY = Registry("SSHEAD")
SSHEAD_REGISTRY.__doc__ = """
return self-supervised head 
"""


def build_ss_head(cfg, input_shape=None):
    """
    Build a backbone from `cfg.MODEL.BACKBONE.NAME`.

    Returns:
        an instance of :class:`Backbone`
    """
    if input_shape is None:
        input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN))

    ss_name = cfg.MODEL.SS.NAME

    ss_head = [SSHEAD_REGISTRY.get(name)(cfg, input_shape) for name in ss_name]
    assert len(ss_head) != 0
    return ss_head
Exemplo n.º 22
0
import torch
from torch import nn
import torch.nn.functional as F
from fvcore.nn import sigmoid_focal_loss_jit, smooth_l1_loss

from detectron2.layers import ShapeSpec
from detectron2.modeling import PROPOSAL_GENERATOR_REGISTRY
from detectron2.structures import Instances
from detectron2.modeling.proposal_generator.proposal_utils import find_top_rpn_proposals
from detectron2.utils.registry import Registry
from detectron2.utils.events import get_event_storage

from slender_det.modeling.grid_generator import zero_center_grid, uniform_grid

REP_POINTS_HEAD_REGISTRY = Registry("RepPointsHead")
REP_POINTS_HEAD_REGISTRY.__doc__ = """
Registry for RepPoints heads, which take feature maps and perform
objectness classification and bounding box regression for each point.

The registered object will be called with `obj(cfg, input_shape)`.
The call should return a `nn.Module` object.
"""


def build_rep_points_head(cfg, input_shape):
    """
    Build a RepPoints head defined by `cfg.MODEL.REP_POINTS_GENERATOR.HEAD_NAME`.
    """
    name = cfg.MODEL.PROPOSAL_GENERATOR.HEAD_NAME
    return REP_POINTS_HEAD_REGISTRY.get(name)(cfg, input_shape)
Exemplo n.º 23
0
# Copyright (c) Facebook, Inc. and its affiliates.
import collections
import math
from typing import List
import torch
from torch import nn

from detectron2.config import configurable
from detectron2.layers import ShapeSpec
from detectron2.structures import Boxes, RotatedBoxes
from detectron2.utils.registry import Registry

ANCHOR_GENERATOR_REGISTRY = Registry("ANCHOR_GENERATOR")
ANCHOR_GENERATOR_REGISTRY.__doc__ = """
Registry for modules that creates object detection anchors for feature maps.

The registered object will be called with `obj(cfg, input_shape)`.
"""


class BufferList(nn.Module):
    """
    Similar to nn.ParameterList, but for buffers
    """
    def __init__(self, buffers):
        super().__init__()
        for i, buffer in enumerate(buffers):
            # Use non-persistent buffer so the values are not saved in checkpoint
            self.register_buffer(str(i), buffer)

    def __len__(self):
Exemplo n.º 24
0
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.utils.registry import Registry
from mobile_cv.common.misc.oss_utils import fb_overwritable

from .extended_coco import coco_text_load, extended_coco_load
from .extended_lvis import extended_lvis_load
from .keypoint_metadata_registry import get_keypoint_metadata

logger = logging.getLogger(__name__)

D2GO_DATASETS_BASE_MODULE = "d2go.datasets"
IM_DIR = "image_directory"
ANN_FN = "annotation_file"
LOAD_KWARGS = "load_kwargs"

COCO_REGISTER_FUNCTION_REGISTRY = Registry("COCO_REGISTER_FUNCTION_REGISTRY")
COCO_REGISTER_FUNCTION_REGISTRY.__doc__ = "Registry - coco register function"

InjectedCocoEntry = namedtuple("InjectedCocoEntry", ["func", "split_dict"])
INJECTED_COCO_DATASETS_LUT = {}


def get_coco_register_function(cfg):
    name = cfg.D2GO_DATA.DATASETS.COCO_INJECTION.REGISTER_FUNCTION
    return COCO_REGISTER_FUNCTION_REGISTRY.get(name)


def _import_dataset(module_name):
    return importlib.import_module("{}.{}".format(D2GO_DATASETS_BASE_MODULE,
                                                  module_name))
Exemplo n.º 25
0
from detectron2.layers import ShapeSpec
from detectron2.modeling.backbone.resnet import BottleneckBlock, make_stage
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.modeling.matcher import Matcher
from detectron2.modeling.poolers import ROIPooler
from detectron2.modeling.proposal_generator.proposal_utils import add_ground_truth_to_proposals
from detectron2.modeling.sampling import subsample_labels
from detectron2.structures import Boxes, Instances, pairwise_iou
from detectron2.utils.events import get_event_storage
from detectron2.utils.registry import Registry
from typing import Dict

from .box_head import build_box_head
from .fast_rcnn import ROI_HEADS_OUTPUT_REGISTRY, FastRCNNOutputLayers, FastRCNNOutputs

ROI_HEADS_REGISTRY = Registry("ROI_HEADS")
ROI_HEADS_REGISTRY.__doc__ = """
Registry for ROI heads in a generalized R-CNN model.
ROIHeads take feature maps and region proposals, and
perform per-region computation.

The registered object will be called with `obj(cfg, input_shape)`.
The call is expected to return an :class:`ROIHeads`.
"""

logger = logging.getLogger(__name__)


def build_roi_heads(cfg, input_shape):
    """
    Build ROIHeads defined by `cfg.MODEL.ROI_HEADS.NAME`.
Exemplo n.º 26
0
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from detectron2.utils.registry import Registry

META_ARCH_REGISTRY = Registry("META_ARCH")  # noqa F401 isort:skip
META_ARCH_REGISTRY.__doc__ = """
Registry for meta-architectures, i.e. the whole model.

The registered object will be called with `obj(cfg)`
and expected to return a `nn.Module` object.
"""


def build_model(cfg):
    """
    Build the whole model architecture, defined by ``cfg.MODEL.META_ARCHITECTURE``.
    Note that it does not load any weights from ``cfg``.
    """
    meta_arch = cfg.MODEL.META_ARCHITECTURE
    return META_ARCH_REGISTRY.get(meta_arch)(cfg)
Exemplo n.º 27
0
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import List
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F

from detectron2.config import configurable
from detectron2.layers import Conv2d, ConvTranspose2d, ShapeSpec, cat, get_norm
from detectron2.structures import Instances
from detectron2.utils.events import get_event_storage
from detectron2.utils.registry import Registry

ROI_MASK_HEAD_REGISTRY = Registry("ROI_MASK_HEAD")
ROI_MASK_HEAD_REGISTRY.__doc__ = """
Registry for mask heads, which predicts instance masks given
per-region features.

The registered object will be called with `obj(cfg, input_shape)`.
"""


def mask_rcnn_loss(pred_mask_logits, instances, vis_period=0):
    """
    Compute the mask prediction loss defined in the Mask R-CNN paper.

    Args:
        pred_mask_logits (Tensor): A tensor of shape (B, C, Hmask, Wmask) or (B, 1, Hmask, Wmask)
            for class-specific or class-agnostic, where B is the total number of predicted masks
            in all images, C is the number of foreground classes, and Hmask, Wmask are the height
            and width of the mask predictions. The values are logits.
Exemplo n.º 28
0
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import List
import torch
from torch import nn
from torch.nn import functional as F

from detectron2.config import configurable
from detectron2.layers import Conv2d, ConvTranspose2d, cat, interpolate
from detectron2.structures import Instances, heatmaps_to_keypoints
from detectron2.utils.events import get_event_storage
from detectron2.utils.registry import Registry

_TOTAL_SKIPPED = 0

ROI_KEYPOINT_HEAD_REGISTRY = Registry("ROI_KEYPOINT_HEAD")
ROI_KEYPOINT_HEAD_REGISTRY.__doc__ = """
Registry for keypoint heads, which make keypoint predictions from per-region features.

The registered object will be called with `obj(cfg, input_shape)`.
"""


def build_keypoint_head(cfg, input_shape):
    """
    Build a keypoint head from `cfg.MODEL.ROI_KEYPOINT_HEAD.NAME`.
    """
    name = cfg.MODEL.ROI_KEYPOINT_HEAD.NAME
    return ROI_KEYPOINT_HEAD_REGISTRY.get(name)(cfg, input_shape)


def keypoint_rcnn_loss(pred_keypoint_logits, instances, normalizer):
Exemplo n.º 29
0
import torch
import torch.nn as nn
from detectron2.utils.registry import Registry

from densepose import Interpolate, ROI_DENSEPOSE_HEAD_REGISTRY
from densepose.modeling.layers import non_local_embedded_gaussian

ROI_SHARED_BLOCK_REGISTRY = Registry("ROI_SHARED_BLOCK")
ROI_SHARED_BLOCK_REGISTRY.__doc__ = """
Registry for shared blocks, which computes shared features given per-region features
Mask, keypoint, densepose heads use shared features for predictions

The registered object will be called with `obj(cfg, input_shape)`.
"""

from detectron2.layers import Conv2d, interpolate


def build_shared_block(cfg, in_channels):
    """
    Build a shared block defined by `cfg.MODEL.ROI_SHARED_BLOCK.NAME`.
    """
    name = cfg.MODEL.ROI_SHARED_BLOCK.NAME
    if name == "":
        return None, in_channels
    model = ROI_SHARED_BLOCK_REGISTRY.get(name)(cfg, in_channels)
    return model, model.n_out_channels


@ROI_SHARED_BLOCK_REGISTRY.register()
class ParsingSharedBlock(nn.Module):
from .GAT import GAT
import fvcore.nn.weight_init as weight_init

from ..poolers import ROIPooler
from detectron2.layers import ShapeSpec, nonzero_tuple
from detectron2.modeling.roi_heads.box_head import build_box_head
from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers
from detectron2.modeling.roi_heads.mask_head import build_mask_head
from detectron2.structures import Boxes, ImageList, Instances, pairwise_iou
from detectron2.utils.registry import Registry

__all__ = [
    "GraphConnection", "GRAPH_CONNECTION_REGISTRY", "build_graph_connection"
]
GRAPH_CONNECTION_REGISTRY = Registry("GRAPH_CONNECTION")
GRAPH_CONNECTION_REGISTRY.__doc__ = """
Registry for graph connection, which make graph connection modules.

The registered object will be called with `obj(cfg, input_shape)`.
"""


def build_graph_connection(cfg, input_shape):
    name = cfg.GRAPH.NAME
    return GRAPH_CONNECTION_REGISTRY.get(name)(cfg, input_shape)


@GRAPH_CONNECTION_REGISTRY.register()
class GraphConnection(nn.Module):
    def __init__(