def build_backbone(lr_backbone, masks, backbone, dilation, hidden_dim, position_embedding): position_embedding = build_position_encoding(hidden_dim, position_embedding) train_backbone = lr_backbone > 0 return_interm_layers = masks backbone = Backbone(backbone, train_backbone, return_interm_layers, dilation) model = Joiner(backbone, position_embedding) model.num_channels = backbone.num_channels return model
def build_backbone(): N_steps = 256 // 2 position_embedding = PositionEmbeddingSine(N_steps, normalize=True) train_backbone = True return_interm_layers = False backbone = Backbone('resnet50', train_backbone, return_interm_layers, False) model = Joiner(backbone, position_embedding) model.num_channels = backbone.num_channels return model
def build_backbone(): position_embedding = PositionEmbeddingSine(num_pos_feats=128, normalize=True) backbone = Backbone('resnet50', train_backbone=True, return_interm_layers=False, dilation=False) model = Joiner(backbone, position_embedding) model.num_channels = backbone.num_channels return model
def _make_detr(backbone_name: str, dilation=False, num_classes=91, mask=False): hidden_dim = 256 backbone = Backbone(backbone_name, train_backbone=True, return_interm_layers=mask, dilation=dilation) pos_enc = PositionEmbeddingSine(hidden_dim // 2, normalize=True) backbone_with_pos_enc = Joiner(backbone, pos_enc) backbone_with_pos_enc.num_channels = backbone.num_channels transformer = Transformer(d_model=hidden_dim, return_intermediate_dec=True) detr = DETR(backbone_with_pos_enc, transformer, num_classes=num_classes, num_queries=100) if mask: return DETRsegm(detr) return detr
def build_backbone(lr_backbone, masks, backbone, dilation, hidden_dim, position_embedding): position_embedding = build_position_encoding(hidden_dim, position_embedding) train_backbone = lr_backbone > 0 return_interm_layers = masks if 'resnet' in backbone: backbone = Backbone(backbone, train_backbone, return_interm_layers, dilation) elif 'mobilenet' in backbone: backbone = MNetBackbone(train_backbone, return_interm_layers) model = Joiner(backbone, position_embedding) model.num_channels = backbone.num_channels return model
def _make_backbone(backbone_name: str, mask: bool = False): if backbone_name[:len("timm_")] == "timm_": backbone = TimmBackbone( backbone_name[len("timm_"):], mask, main_layer=-1, group_norm=True, ) else: backbone = Backbone(backbone_name, train_backbone=True, return_interm_layers=mask, dilation=False) hidden_dim = 256 pos_enc = PositionEmbeddingSine(hidden_dim // 2, normalize=True) backbone_with_pos_enc = Joiner(backbone, pos_enc) backbone_with_pos_enc.num_channels = backbone.num_channels return backbone_with_pos_enc
def __init__(self, body, num_classes=90, num_queries=100, aux_loss=True, num_channels=512, hidden_dim=64, dropout=.1, nheads=8, dim_feedforward=256, enc_layers=2, dec_layers=2, pre_norm=False, return_intermediate_dec=True, position_embedding=None): backbone = Backbone(body=body) N_steps = hidden_dim // 2 position_embedding = position_embedding if position_embedding is not None else PositionEmbeddingSine( N_steps, normalize=True) model = Joiner(backbone, position_embedding) model.num_channels = num_channels transformer = Transformer( d_model=hidden_dim, dropout=dropout, nhead=nheads, dim_feedforward=dim_feedforward, num_encoder_layers=enc_layers, num_decoder_layers=dec_layers, normalize_before=pre_norm, return_intermediate_dec=True, ) super().__init__(model, transformer, num_classes=num_classes, num_queries=num_queries, aux_loss=aux_loss)
def __init__(self, cfg): super().__init__() self.device = torch.device(cfg.MODEL.DEVICE) self.num_classes = cfg.MODEL.DETR.NUM_CLASSES hidden_dim = cfg.MODEL.DETR.HIDDEN_DIM num_queries = cfg.MODEL.DETR.NUM_OBJECT_QUERIES # Transformer parameters: nheads = cfg.MODEL.DETR.NHEADS dropout = cfg.MODEL.DETR.DROPOUT dim_feedforward = cfg.MODEL.DETR.DIM_FEEDFORWARD enc_layers = cfg.MODEL.DETR.ENC_LAYERS dec_layers = cfg.MODEL.DETR.DEC_LAYERS pre_norm = cfg.MODEL.DETR.PRE_NORM pass_pos_and_query = cfg.MODEL.DETR.PASS_POS_AND_QUERY # Loss parameters: giou_weight = cfg.MODEL.DETR.GIOU_WEIGHT l1_weight = cfg.MODEL.DETR.L1_WEIGHT deep_supervision = cfg.MODEL.DETR.DEEP_SUPERVISION no_object_weight = cfg.MODEL.DETR.NO_OBJECT_WEIGHT N_steps = hidden_dim // 2 d2_backbone = MaskedBackbone(cfg) backbone = Joiner(d2_backbone, PositionEmbeddingSine(N_steps, normalize=True)) backbone.num_channels = d2_backbone.num_channels transformer = Transformer( d_model=hidden_dim, dropout=dropout, nhead=nheads, dim_feedforward=dim_feedforward, num_encoder_layers=enc_layers, num_decoder_layers=dec_layers, normalize_before=pre_norm, return_intermediate_dec=deep_supervision, pass_pos_and_query=pass_pos_and_query, ) self.detr = DETR(backbone, transformer, num_classes=self.num_classes, num_queries=num_queries, aux_loss=deep_supervision) self.detr.to(self.device) # building criterion matcher = HungarianMatcher(cost_class=1, cost_bbox=l1_weight, cost_giou=giou_weight) weight_dict = {"loss_ce": 1, "loss_bbox": l1_weight} weight_dict["loss_giou"] = giou_weight if deep_supervision: aux_weight_dict = {} for i in range(dec_layers - 1): aux_weight_dict.update( {k + f"_{i}": v for k, v in weight_dict.items()}) weight_dict.update(aux_weight_dict) losses = ["labels", "boxes", "cardinality"] self.criterion = SetCriterion(self.num_classes, matcher=matcher, weight_dict=weight_dict, eos_coef=no_object_weight, losses=losses) self.criterion.to(self.device) pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view( 3, 1, 1) pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view( 3, 1, 1) self.normalizer = lambda x: (x - pixel_mean) / pixel_std self.to(self.device)
def __init__(self, cfg): super().__init__() self.device = torch.device(cfg.MODEL.DEVICE) self.num_classes = cfg.MODEL.DETR.NUM_CLASSES self.mask_on = cfg.MODEL.MASK_ON hidden_dim = cfg.MODEL.DETR.HIDDEN_DIM num_queries = cfg.MODEL.DETR.NUM_OBJECT_QUERIES # Transformer parameters: nheads = cfg.MODEL.DETR.NHEADS dropout = cfg.MODEL.DETR.DROPOUT dim_feedforward = cfg.MODEL.DETR.DIM_FEEDFORWARD enc_layers = cfg.MODEL.DETR.ENC_LAYERS dec_layers = cfg.MODEL.DETR.DEC_LAYERS pre_norm = cfg.MODEL.DETR.PRE_NORM # Loss parameters: giou_weight = cfg.MODEL.DETR.GIOU_WEIGHT l1_weight = cfg.MODEL.DETR.L1_WEIGHT deep_supervision = cfg.MODEL.DETR.DEEP_SUPERVISION no_object_weight = cfg.MODEL.DETR.NO_OBJECT_WEIGHT N_steps = hidden_dim // 2 d2_backbone = MaskedBackbone(cfg) backbone = Joiner(d2_backbone, PositionEmbeddingSine(N_steps, normalize=True)) backbone.num_channels = d2_backbone.num_channels transformer = Transformer( d_model=hidden_dim, dropout=dropout, nhead=nheads, dim_feedforward=dim_feedforward, num_encoder_layers=enc_layers, num_decoder_layers=dec_layers, normalize_before=pre_norm, return_intermediate_dec=deep_supervision, ) self.detr = DETR(backbone, transformer, num_classes=self.num_classes, num_queries=num_queries, aux_loss=deep_supervision) if self.mask_on: frozen_weights = cfg.MODEL.DETR.FROZEN_WEIGHTS if frozen_weights != '': print("LOAD pre-trained weights") weight = torch.load( frozen_weights, map_location=lambda storage, loc: storage)['model'] new_weight = {} for k, v in weight.items(): if 'detr.' in k: new_weight[k.replace('detr.', '')] = v else: print(f"Skipping loading weight {k} from frozen model") del weight self.detr.load_state_dict(new_weight) del new_weight self.detr = DETRsegm(self.detr, freeze_detr=(frozen_weights != '')) self.seg_postprocess = PostProcessSegm self.detr.to(self.device) # building criterion matcher = HungarianMatcher(cost_class=1, cost_bbox=l1_weight, cost_giou=giou_weight) weight_dict = {"loss_ce": 1, "loss_bbox": l1_weight} weight_dict["loss_giou"] = giou_weight if deep_supervision: aux_weight_dict = {} for i in range(dec_layers - 1): aux_weight_dict.update( {k + f"_{i}": v for k, v in weight_dict.items()}) weight_dict.update(aux_weight_dict) losses = ["labels", "boxes", "cardinality"] if self.mask_on: losses += ["masks"] self.criterion = SetCriterion( self.num_classes, matcher=matcher, weight_dict=weight_dict, eos_coef=no_object_weight, losses=losses, ) self.criterion.to(self.device) pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view( 3, 1, 1) pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view( 3, 1, 1) self.normalizer = lambda x: (x - pixel_mean) / pixel_std self.to(self.device)
collate_fn=utils.collate_fn_os, num_workers=args.num_workers) data_loader_val = DataLoader(dataset_train, args.batch_size, sampler=sampler_val, collate_fn=utils.collate_fn_os, drop_last=False, num_workers=args.num_workers) # %% BUILD MODEL position_embedding = build_position_encoding(args) train_backbone = args.lr_backbone > 0 base_backbone = Backbone(args.backbone, train_backbone, False, args.dilation) backbone = Joiner(base_backbone, position_embedding) backbone.num_channels = base_backbone.num_channels transformer = Transformer( d_model=args.hidden_dim, dropout=args.dropout, nhead=args.nheads, dim_feedforward=args.dim_feedforward, num_encoder_layers=args.enc_layers, num_decoder_layers=args.dec_layers, normalize_before=args.pre_norm, return_intermediate_dec=True, ) model = OSDETR( backbone, transformer, num_classes=args.num_classes,
def __init__(self, args): super(my_DETR, self).__init__() N_steps = args.hidden_dim // 2 if args.position_embedding in ('v2', 'sine'): # TODO find a better way of exposing other arguments position_embedding = PositionEmbeddingSine(N_steps, normalize=True) elif args.position_embedding in ('v3', 'learned'): position_embedding = PositionEmbeddingLearned(N_steps) else: raise ValueError(f"not supported {args.position_embedding}") train_backbone = args.lr_backbone > 0 return_interm_layers = args.masks backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation) joiner = Joiner(backbone, position_embedding) joiner.num_channels = backbone.num_channels transformer = Transformer( d_model=args.hidden_dim, dropout=args.dropout, nhead=args.nheads, dim_feedforward=args.dim_feedforward, num_encoder_layers=args.enc_layers, num_decoder_layers=args.dec_layers, normalize_before=args.pre_norm, return_intermediate_dec=True, ) num_classes = 20 if args.dataset_file != 'coco' else 91 if args.dataset_file == "coco_panoptic": num_classes = 250 matcher = build_matcher(args) weight_dict = {'loss_ce': 1, 'loss_bbox': args.bbox_loss_coef} weight_dict['loss_giou'] = args.giou_loss_coef # if args.masks: # weight_dict["loss_mask"] = args.mask_loss_coef # weight_dict["loss_dice"] = args.dice_loss_coef # TODO this is a hack if args.aux_loss: aux_weight_dict = {} for i in range(args.dec_layers - 1): aux_weight_dict.update( {k + f'_{i}': v for k, v in weight_dict.items()}) weight_dict.update(aux_weight_dict) losses = ['labels', 'boxes', 'cardinality'] if args.masks: losses += ["masks"] self.criterion = SetCriterion(num_classes, matcher=matcher, weight_dict=weight_dict, eos_coef=args.eos_coef, losses=losses) self.num_queries = args.num_queries self.transformer = transformer hidden_dim = transformer.d_model self.class_embed = nn.Linear(hidden_dim, num_classes + 1) self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) self.query_embed = nn.Embedding(args.num_queries, hidden_dim) self.input_proj = nn.Conv2d(joiner.num_channels, hidden_dim, kernel_size=1) self.backbone = joiner self.aux_loss = args.aux_loss