コード例 #1
0
    def test_hungarian(self):
        n_queries, n_targets, n_classes = 100, 15, 91
        logits = torch.rand(1, n_queries, n_classes + 1)
        boxes = torch.rand(1, n_queries, 4)
        tgt_labels = torch.randint(high=n_classes, size=(n_targets,))
        tgt_boxes = torch.rand(n_targets, 4)
        matcher = HungarianMatcher()
        targets = [{'labels': tgt_labels, 'boxes': tgt_boxes}]
        indices_single = matcher({'pred_logits': logits, 'pred_boxes': boxes}, targets)
        indices_batched = matcher({'pred_logits': logits.repeat(2, 1, 1),
                                   'pred_boxes': boxes.repeat(2, 1, 1)}, targets * 2)
        self.assertEqual(len(indices_single[0][0]), n_targets)
        self.assertEqual(len(indices_single[0][1]), n_targets)
        self.assertEqual(self.indices_torch2python(indices_single),
                         self.indices_torch2python([indices_batched[0]]))
        self.assertEqual(self.indices_torch2python(indices_single),
                         self.indices_torch2python([indices_batched[1]]))

        # test with empty targets
        tgt_labels_empty = torch.randint(high=n_classes, size=(0,))
        tgt_boxes_empty = torch.rand(0, 4)
        targets_empty = [{'labels': tgt_labels_empty, 'boxes': tgt_boxes_empty}]
        indices = matcher({'pred_logits': logits.repeat(2, 1, 1),
                           'pred_boxes': boxes.repeat(2, 1, 1)}, targets + targets_empty)
        self.assertEqual(len(indices[1][0]), 0)
        indices = matcher({'pred_logits': logits.repeat(2, 1, 1),
                           'pred_boxes': boxes.repeat(2, 1, 1)}, targets_empty * 2)
        self.assertEqual(len(indices[0][0]), 0)
コード例 #2
0
 def __init__(self, num_classes, loss_weight, na_coef, losses, matcher):
     """ Create the criterion.
     Parameters:
         num_classes: number of relation categories
         matcher: module able to compute a matching between targets and proposals
         loss_weight: dict containing as key the names of the losses and as values their relative weight.
         na_coef: list containg the relative classification weight applied to the NA category and positional classification weight applied to the [SEP]
         losses: list of all the losses to be applied. See get_loss for list of available losses.
     """
     super().__init__()
     self.num_classes = num_classes
     self.loss_weight = loss_weight
     self.matcher = HungarianMatcher(loss_weight, matcher)
     self.losses = losses
     rel_weight = torch.ones(self.num_classes + 1)
     rel_weight[-1] = na_coef
     self.register_buffer('rel_weight', rel_weight)
コード例 #3
0
set_cost_bbox = 5
set_cost_giou = 2
eos_coef = 0.1
num_classes = 6

body = create_body(models.resnet18, True, -2)
model = DETRFastAi(body, num_classes=num_classes, aux_loss=aux_loss)

losses = ['labels', 'boxes', 'cardinality']
weight_dict = {
    'loss_ce': 1,
    'loss_bbox': set_cost_bbox,
    'loss_giou': set_cost_giou
}
matcher = HungarianMatcher(cost_class=set_cost_class,
                           cost_bbox=set_cost_bbox,
                           cost_giou=set_cost_giou)

crit = SetCriterionFastAi(num_classes,
                          matcher=matcher,
                          weight_dict=weight_dict,
                          eos_coef=eos_coef,
                          losses=losses)

size = 128
coco = untar_data(URLs.COCO_TINY)
images, lbl_bbox = get_annotations(
    coco / 'train.json')  #'annotations/train_sample.json'
img2bbox = dict(zip(images, lbl_bbox))
get_y_func = lambda o: img2bbox[o.name]
コード例 #4
0
    def __init__(self, cfg):
        super().__init__()

        self.device = torch.device(cfg.MODEL.DEVICE)

        self.num_classes = cfg.MODEL.DETR.NUM_CLASSES
        hidden_dim = cfg.MODEL.DETR.HIDDEN_DIM
        num_queries = cfg.MODEL.DETR.NUM_OBJECT_QUERIES
        # Transformer parameters:
        nheads = cfg.MODEL.DETR.NHEADS
        dropout = cfg.MODEL.DETR.DROPOUT
        dim_feedforward = cfg.MODEL.DETR.DIM_FEEDFORWARD
        enc_layers = cfg.MODEL.DETR.ENC_LAYERS
        dec_layers = cfg.MODEL.DETR.DEC_LAYERS
        pre_norm = cfg.MODEL.DETR.PRE_NORM
        pass_pos_and_query = cfg.MODEL.DETR.PASS_POS_AND_QUERY

        # Loss parameters:
        giou_weight = cfg.MODEL.DETR.GIOU_WEIGHT
        l1_weight = cfg.MODEL.DETR.L1_WEIGHT
        deep_supervision = cfg.MODEL.DETR.DEEP_SUPERVISION
        no_object_weight = cfg.MODEL.DETR.NO_OBJECT_WEIGHT

        N_steps = hidden_dim // 2
        d2_backbone = MaskedBackbone(cfg)
        backbone = Joiner(d2_backbone,
                          PositionEmbeddingSine(N_steps, normalize=True))
        backbone.num_channels = d2_backbone.num_channels

        transformer = Transformer(
            d_model=hidden_dim,
            dropout=dropout,
            nhead=nheads,
            dim_feedforward=dim_feedforward,
            num_encoder_layers=enc_layers,
            num_decoder_layers=dec_layers,
            normalize_before=pre_norm,
            return_intermediate_dec=deep_supervision,
            pass_pos_and_query=pass_pos_and_query,
        )

        self.detr = DETR(backbone,
                         transformer,
                         num_classes=self.num_classes,
                         num_queries=num_queries,
                         aux_loss=deep_supervision)
        self.detr.to(self.device)

        # building criterion
        matcher = HungarianMatcher(cost_class=1,
                                   cost_bbox=l1_weight,
                                   cost_giou=giou_weight)
        weight_dict = {"loss_ce": 1, "loss_bbox": l1_weight}
        weight_dict["loss_giou"] = giou_weight
        if deep_supervision:
            aux_weight_dict = {}
            for i in range(dec_layers - 1):
                aux_weight_dict.update(
                    {k + f"_{i}": v
                     for k, v in weight_dict.items()})
            weight_dict.update(aux_weight_dict)
        losses = ["labels", "boxes", "cardinality"]
        self.criterion = SetCriterion(self.num_classes,
                                      matcher=matcher,
                                      weight_dict=weight_dict,
                                      eos_coef=no_object_weight,
                                      losses=losses)
        self.criterion.to(self.device)

        pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(
            3, 1, 1)
        pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(
            3, 1, 1)
        self.normalizer = lambda x: (x - pixel_mean) / pixel_std
        self.to(self.device)
コード例 #5
0
ファイル: detr.py プロジェクト: yhZhai/detr
    def __init__(self, cfg):
        super().__init__()

        self.device = torch.device(cfg.MODEL.DEVICE)

        self.num_classes = cfg.MODEL.DETR.NUM_CLASSES
        self.mask_on = cfg.MODEL.MASK_ON
        hidden_dim = cfg.MODEL.DETR.HIDDEN_DIM
        num_queries = cfg.MODEL.DETR.NUM_OBJECT_QUERIES
        # Transformer parameters:
        nheads = cfg.MODEL.DETR.NHEADS
        dropout = cfg.MODEL.DETR.DROPOUT
        dim_feedforward = cfg.MODEL.DETR.DIM_FEEDFORWARD
        enc_layers = cfg.MODEL.DETR.ENC_LAYERS
        dec_layers = cfg.MODEL.DETR.DEC_LAYERS
        pre_norm = cfg.MODEL.DETR.PRE_NORM

        # Loss parameters:
        giou_weight = cfg.MODEL.DETR.GIOU_WEIGHT
        l1_weight = cfg.MODEL.DETR.L1_WEIGHT
        deep_supervision = cfg.MODEL.DETR.DEEP_SUPERVISION
        no_object_weight = cfg.MODEL.DETR.NO_OBJECT_WEIGHT

        N_steps = hidden_dim // 2
        d2_backbone = MaskedBackbone(cfg)
        backbone = Joiner(d2_backbone,
                          PositionEmbeddingSine(N_steps, normalize=True))
        backbone.num_channels = d2_backbone.num_channels

        transformer = Transformer(
            d_model=hidden_dim,
            dropout=dropout,
            nhead=nheads,
            dim_feedforward=dim_feedforward,
            num_encoder_layers=enc_layers,
            num_decoder_layers=dec_layers,
            normalize_before=pre_norm,
            return_intermediate_dec=deep_supervision,
        )

        self.detr = DETR(backbone,
                         transformer,
                         num_classes=self.num_classes,
                         num_queries=num_queries,
                         aux_loss=deep_supervision)
        if self.mask_on:
            frozen_weights = cfg.MODEL.DETR.FROZEN_WEIGHTS
            if frozen_weights != '':
                print("LOAD pre-trained weights")
                weight = torch.load(
                    frozen_weights,
                    map_location=lambda storage, loc: storage)['model']
                new_weight = {}
                for k, v in weight.items():
                    if 'detr.' in k:
                        new_weight[k.replace('detr.', '')] = v
                    else:
                        print(f"Skipping loading weight {k} from frozen model")
                del weight
                self.detr.load_state_dict(new_weight)
                del new_weight
            self.detr = DETRsegm(self.detr, freeze_detr=(frozen_weights != ''))
            self.seg_postprocess = PostProcessSegm

        self.detr.to(self.device)

        # building criterion
        matcher = HungarianMatcher(cost_class=1,
                                   cost_bbox=l1_weight,
                                   cost_giou=giou_weight)
        weight_dict = {"loss_ce": 1, "loss_bbox": l1_weight}
        weight_dict["loss_giou"] = giou_weight
        if deep_supervision:
            aux_weight_dict = {}
            for i in range(dec_layers - 1):
                aux_weight_dict.update(
                    {k + f"_{i}": v
                     for k, v in weight_dict.items()})
            weight_dict.update(aux_weight_dict)
        losses = ["labels", "boxes", "cardinality"]
        if self.mask_on:
            losses += ["masks"]
        self.criterion = SetCriterion(
            self.num_classes,
            matcher=matcher,
            weight_dict=weight_dict,
            eos_coef=no_object_weight,
            losses=losses,
        )
        self.criterion.to(self.device)

        pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(
            3, 1, 1)
        pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(
            3, 1, 1)
        self.normalizer = lambda x: (x - pixel_mean) / pixel_std
        self.to(self.device)
コード例 #6
0
ファイル: ch_train_driver.py プロジェクト: chjort/detr
    dropout=args.dropout,
    nhead=args.nheads,
    dim_feedforward=args.dim_feedforward,
    num_encoder_layers=args.enc_layers,
    num_decoder_layers=args.dec_layers,
    normalize_before=args.pre_norm,
    return_intermediate_dec=True,
)
model = OSDETR(
    backbone,
    transformer,
    num_classes=args.num_classes,
    num_queries=args.num_queries,
    aux_loss=args.aux_loss,
)
matcher = HungarianMatcher(cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou)

weight_dict = {'loss_ce': 1, 'loss_bbox': args.bbox_loss_coef}
weight_dict['loss_giou'] = args.giou_loss_coef
if args.aux_loss:
    aux_weight_dict = {}
    for i in range(args.dec_layers - 1):
        aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items()})
    weight_dict.update(aux_weight_dict)

losses = ['labels', 'boxes', 'cardinality']
criterion = SetCriterion(args.num_classes, matcher=matcher, weight_dict=weight_dict,
                         eos_coef=args.eos_coef, losses=losses)
postprocessors = {'bbox': PostProcess()}

criterion.to(device)
コード例 #7
0
                "boxes": dg.to_variable(np.zeros([3, 4], dtype="float32"))
            },
            {
                "labels": dg.to_variable(np.zeros([
                    17,
                ], dtype="int64")),
                "boxes": dg.to_variable(np.zeros([17, 4], dtype="float32"))
            },
            {
                "labels": dg.to_variable(np.zeros([
                    5,
                ], dtype="int64")),
                "boxes": dg.to_variable(np.zeros([5, 4], dtype="float32"))
            },
        ]

        matcher = HungarianMatcher(1, 1, 1)
        indices = matcher(out, target)
        for ind in indices:
            i_ind, j_ind = ind
            print(i_ind.shape, j_ind.shape)
            # [6] [6]
            # [3] [3]
            # [17] [17]
            # [5] [5]

        loss = criterion(out, target)
        for name, loss in loss.items():
            print(name)
            print(loss)