Example #1
0
def build_paddle_model():
    # * backbone
    backbone = 'resnet50'
    dilation = False
    position_embedding = 'sine'

    # * Transformer
    enc_layers = 6
    dec_layers = 6
    dim_feedforward = 2048
    hidden_dim = 256
    dropout = 0
    nheads = 8
    num_queries = 100
    pre_norm = False
    num_classes = 91

    position_embedding = PositionEmbeddingSine(hidden_dim // 2, normalize=True)
    backbone = Backbone(backbone, False, True, dilation)
    backbone = Joiner(backbone, position_embedding)
    backbone.num_channels = 2048
    transformer = Transformer(
        d_model=hidden_dim, dropout=dropout, nhead=nheads, dim_feedforward=dim_feedforward, 
        num_encoder_layers=enc_layers, num_decoder_layers=dec_layers, normalize_before=pre_norm, 
        return_intermediate_dec=True)
    model = DETR(backbone, transformer, num_classes=num_classes, num_queries=num_queries, aux_loss=True)
    
    return model
Example #2
0
def build_backbone(lr_backbone, masks, backbone, dilation, hidden_dim, position_embedding):
    position_embedding = build_position_encoding(hidden_dim, position_embedding)
    train_backbone = lr_backbone > 0
    return_interm_layers = masks
    backbone = Backbone(backbone, train_backbone, return_interm_layers, dilation)
    model = Joiner(backbone, position_embedding)
    model.num_channels = backbone.num_channels
    return model
Example #3
0
    def test_transformer_forward(self):
        backbone = Backbone('resnet50', True, True, False)
        x = nested_tensor_from_tensor_list(
            [torch.rand(3, 200, 200),
             torch.rand(3, 200, 250)])

        out = backbone(x)
        for key, value in out.items():
            print('{} {}'.format(key, value.tensors.shape))
Example #4
0
def build_backbone():
    N_steps = 256 // 2
    position_embedding = PositionEmbeddingSine(N_steps, normalize=True)
    train_backbone = True
    return_interm_layers = False
    backbone = Backbone('resnet50', train_backbone, return_interm_layers,
                        False)
    model = Joiner(backbone, position_embedding)
    model.num_channels = backbone.num_channels
    return model
Example #5
0
def build_backbone():
    position_embedding = PositionEmbeddingSine(num_pos_feats=128,
                                               normalize=True)
    backbone = Backbone('resnet50',
                        train_backbone=True,
                        return_interm_layers=False,
                        dilation=False)
    model = Joiner(backbone, position_embedding)
    model.num_channels = backbone.num_channels
    return model
Example #6
0
def _make_detr(backbone_name: str, dilation=False, num_classes=91, mask=False):
    hidden_dim = 256
    backbone = Backbone(backbone_name, train_backbone=True, return_interm_layers=mask, dilation=dilation)
    pos_enc = PositionEmbeddingSine(hidden_dim // 2, normalize=True)
    backbone_with_pos_enc = Joiner(backbone, pos_enc)
    backbone_with_pos_enc.num_channels = backbone.num_channels
    transformer = Transformer(d_model=hidden_dim, return_intermediate_dec=True)
    detr = DETR(backbone_with_pos_enc, transformer, num_classes=num_classes, num_queries=100)
    if mask:
        return DETRsegm(detr)
    return detr
Example #7
0
def build_backbone(lr_backbone, masks, backbone, dilation, hidden_dim, position_embedding):
    position_embedding = build_position_encoding(hidden_dim, position_embedding)
    train_backbone = lr_backbone > 0
    return_interm_layers = masks
    if 'resnet' in backbone:
        backbone = Backbone(backbone, train_backbone, return_interm_layers, dilation)
    elif 'mobilenet' in backbone:
        backbone = MNetBackbone(train_backbone, return_interm_layers)
    model = Joiner(backbone, position_embedding)
    model.num_channels = backbone.num_channels
    return model
Example #8
0
def _make_backbone(backbone_name: str, mask: bool = False):
    if backbone_name[:len("timm_")] == "timm_":
        backbone = TimmBackbone(
            backbone_name[len("timm_"):],
            mask,
            main_layer=-1,
            group_norm=True,
        )
    else:
        backbone = Backbone(backbone_name,
                            train_backbone=True,
                            return_interm_layers=mask,
                            dilation=False)

    hidden_dim = 256
    pos_enc = PositionEmbeddingSine(hidden_dim // 2, normalize=True)
    backbone_with_pos_enc = Joiner(backbone, pos_enc)
    backbone_with_pos_enc.num_channels = backbone.num_channels
    return backbone_with_pos_enc
Example #9
0
    def __init__(self,
                 input_image_shape,
                 num_classes,
                 pos_threshold=0.5,
                 neg_threshold=0.1,
                 predict_conf_threshold=0.75,
                 **kwargs):

        super(ObjectDetection, self).__init__(**kwargs)
        self.pos_threshold = pos_threshold
        self.neg_threshold = neg_threshold
        self.predict_conf_threshold = predict_conf_threshold
        self.num_classes = num_classes
        self.input_image_shape = input_image_shape

        # The feature counts / depth for each feature map considered
        # for the class regression head
        self.FEATURE_COUNTS = (64, )

        # Anchor sizes  (per layer)
        # The anchors sizes need to scale to cover the sizes of possible objects
        # in the dataset.
        # You can either set an absolute pixel value, or set based off size of image.
        width = input_image_shape[-1]
        self.ANCHOR_SIZES = ((width // 8, width // 4, width // 2, width), )

        # These ratios are for all anchors
        self.ANCHOR_RATIOS = (1.0, 0.5, 2.0)

        self.backbone = Backbone()
        self.anchor_generator = AnchorGenerator(
            sizes=self.ANCHOR_SIZES, aspect_ratios=self.ANCHOR_RATIOS)

        self.box_prediction = BoxPrediction(
            num_features=self.FEATURE_COUNTS,
            num_class=num_classes,
            batch_norm=True,
            num_anchors=[
                len(anchors) * len(self.ANCHOR_RATIOS)
                for anchors in self.ANCHOR_SIZES
            ])

        self.loss = torch.nn.BCEWithLogitsLoss(reduce=False)
Example #10
0
 def test_backbone_script(self):
     backbone = Backbone('resnet50', True, False, False)
     torch.jit.script(backbone)  # noqa
Example #11
0
 def __init__(self):
     super(Solo, self).__init__()
     self.backbone = Backbone()
     self.fpn = FPN()
     self.ins_head = InsHead()
     self.mask_head = MaskHead()
Example #12
0
data_loader_train = DataLoader(dataset_train,
                               batch_sampler=batch_sampler_train,
                               collate_fn=utils.collate_fn_os,
                               num_workers=args.num_workers)

data_loader_val = DataLoader(dataset_train,
                             args.batch_size,
                             sampler=sampler_val,
                             collate_fn=utils.collate_fn_os,
                             drop_last=False,
                             num_workers=args.num_workers)

# %% BUILD MODEL
position_embedding = build_position_encoding(args)
train_backbone = args.lr_backbone > 0
base_backbone = Backbone(args.backbone, train_backbone, False, args.dilation)
backbone = Joiner(base_backbone, position_embedding)
backbone.num_channels = base_backbone.num_channels

transformer = Transformer(
    d_model=args.hidden_dim,
    dropout=args.dropout,
    nhead=args.nheads,
    dim_feedforward=args.dim_feedforward,
    num_encoder_layers=args.enc_layers,
    num_decoder_layers=args.dec_layers,
    normalize_before=args.pre_norm,
    return_intermediate_dec=True,
)
model = OSDETR(
    backbone,
Example #13
0
    def __init__(self, args):
        super(my_DETR, self).__init__()

        N_steps = args.hidden_dim // 2
        if args.position_embedding in ('v2', 'sine'):
            # TODO find a better way of exposing other arguments
            position_embedding = PositionEmbeddingSine(N_steps, normalize=True)
        elif args.position_embedding in ('v3', 'learned'):
            position_embedding = PositionEmbeddingLearned(N_steps)
        else:
            raise ValueError(f"not supported {args.position_embedding}")
        train_backbone = args.lr_backbone > 0
        return_interm_layers = args.masks
        backbone = Backbone(args.backbone, train_backbone,
                            return_interm_layers, args.dilation)
        joiner = Joiner(backbone, position_embedding)
        joiner.num_channels = backbone.num_channels
        transformer = Transformer(
            d_model=args.hidden_dim,
            dropout=args.dropout,
            nhead=args.nheads,
            dim_feedforward=args.dim_feedforward,
            num_encoder_layers=args.enc_layers,
            num_decoder_layers=args.dec_layers,
            normalize_before=args.pre_norm,
            return_intermediate_dec=True,
        )
        num_classes = 20 if args.dataset_file != 'coco' else 91
        if args.dataset_file == "coco_panoptic":
            num_classes = 250
        matcher = build_matcher(args)
        weight_dict = {'loss_ce': 1, 'loss_bbox': args.bbox_loss_coef}
        weight_dict['loss_giou'] = args.giou_loss_coef
        # if args.masks:
        #     weight_dict["loss_mask"] = args.mask_loss_coef
        #     weight_dict["loss_dice"] = args.dice_loss_coef
        # TODO this is a hack
        if args.aux_loss:
            aux_weight_dict = {}
            for i in range(args.dec_layers - 1):
                aux_weight_dict.update(
                    {k + f'_{i}': v
                     for k, v in weight_dict.items()})
            weight_dict.update(aux_weight_dict)

        losses = ['labels', 'boxes', 'cardinality']
        if args.masks:
            losses += ["masks"]
        self.criterion = SetCriterion(num_classes,
                                      matcher=matcher,
                                      weight_dict=weight_dict,
                                      eos_coef=args.eos_coef,
                                      losses=losses)
        self.num_queries = args.num_queries
        self.transformer = transformer
        hidden_dim = transformer.d_model
        self.class_embed = nn.Linear(hidden_dim, num_classes + 1)
        self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
        self.query_embed = nn.Embedding(args.num_queries, hidden_dim)
        self.input_proj = nn.Conv2d(joiner.num_channels,
                                    hidden_dim,
                                    kernel_size=1)
        self.backbone = joiner
        self.aux_loss = args.aux_loss