コード例 #1
0
 def test_mask_rcnn(self):
     model = mask_rcnn.maskrcnn_resnet50_fpn(pretrained=False,
                                             pretrained_backbone=True,
                                             min_size=200,
                                             max_size=300)
     images, test_images = _get_test_images()
     self.run_test(model, (images, ), rtol=1e-3, atol=1e-5)
     self.run_test(
         model,
         (images, ),
         input_names=["images_tensors"],
         output_names=["boxes", "labels", "scores", "masks"],
         dynamic_axes={
             "images_tensors": [0, 1, 2],
             "boxes": [0, 1],
             "labels": [0],
             "scores": [0],
             "masks": [0, 1, 2],
         },
         rtol=1e-3,
         atol=1e-5,
     )
     dummy_image = [torch.ones(3, 100, 100) * 0.3]
     self.run_test(
         model,
         (images, ),
         additional_test_inputs=[(images, ), (test_images, ),
                                 (dummy_image, )],
         input_names=["images_tensors"],
         output_names=["boxes", "labels", "scores", "masks"],
         dynamic_axes={
             "images_tensors": [0, 1, 2],
             "boxes": [0, 1],
             "labels": [0],
             "scores": [0],
             "masks": [0, 1, 2],
         },
         rtol=1e-3,
         atol=1e-5,
     )
     self.run_test(
         model,
         (dummy_image, ),
         additional_test_inputs=[(dummy_image, ), (images, )],
         input_names=["images_tensors"],
         output_names=["boxes", "labels", "scores", "masks"],
         dynamic_axes={
             "images_tensors": [0, 1, 2],
             "boxes": [0, 1],
             "labels": [0],
             "scores": [0],
             "masks": [0, 1, 2],
         },
         rtol=1e-3,
         atol=1e-5,
     )
コード例 #2
0
def multitask_maskrcnn_resnet50_fpn(
        **kwargs
):
    task_heads = [
        ClassificationTaskHead(name, num_classes)
        for name, num_classes in [("roof_style", 4), ("roof_material", 4)]
    ]
    task_heads = [head.cuda() for head in task_heads]
    model = maskrcnn_resnet50_fpn(num_classes=NUM_CLASSES, **kwargs)
    [model.add_module(head.name, head) for head in task_heads]

    model.roi_heads.forward = partial(forward, model.roi_heads, task_heads=task_heads)
    return model
コード例 #3
0
ファイル: train.py プロジェクト: cwood1967/xlearn
def get_model(num_classes, pretrained=True):
    model = maskrcnn_resnet50_fpn(pretrained=pretrained)
    
    in_features = model.roi_heads.box_predictor.cls_score.in_features
    model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
    
    in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
    print("IFM", in_features_mask)
    hidden_layer = 256
    
    model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,
                                                       hidden_layer,
                                                       num_classes)
    
    return model
コード例 #4
0
ファイル: model.py プロジェクト: anrim/icevision
def model(
    num_classes: int,
    backbone: Optional[nn.Module] = None,
    remove_internal_transforms: bool = True,
    pretrained: bool = True,
    **mask_rcnn_kwargs
) -> nn.Module:
    """MaskRCNN model implemented by torchvision.

    # Arguments
        num_classes: Number of classes.
        backbone: Backbone model to use. Defaults to a resnet50_fpn model.
        remove_internal_transforms: The torchvision model internally applies transforms
        like resizing and normalization, but we already do this at the `Dataset` level,
        so it's safe to remove those internal transforms.
        pretrained: Argument passed to `maskrcnn_resnet50_fpn` if `backbone is None`.
        By default it is set to True: this is generally used when training a new model (transfer learning).
        `pretrained = False`  is used during inference (prediction) for cases where the users have their own pretrained weights.
        **mask_rcnn_kwargs: Keyword arguments that internally are going to be passed to
        `torchvision.models.detection.mask_rcnn.MaskRCNN`.

    # Return
        A Pytorch `nn.Module`.
    """
    if backbone is None:
        model = maskrcnn_resnet50_fpn(
            pretrained=pretrained, pretrained_backbone=pretrained, **mask_rcnn_kwargs
        )

        in_features_box = model.roi_heads.box_predictor.cls_score.in_features
        model.roi_heads.box_predictor = FastRCNNPredictor(in_features_box, num_classes)

        in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
        model.roi_heads.mask_predictor = MaskRCNNPredictor(
            in_channels=in_features_mask, dim_reduced=256, num_classes=num_classes
        )

        backbone_param_groups = resnet_fpn.param_groups(model.backbone)
    else:
        model = MaskRCNN(backbone, num_classes=num_classes, **mask_rcnn_kwargs)
        backbone_param_groups = backbone.param_groups()

    patch_param_groups(model=model, backbone_param_groups=backbone_param_groups)

    if remove_internal_transforms:
        remove_internal_model_transforms(model)

    return model
コード例 #5
0
ファイル: model.py プロジェクト: potipot/icevision
def model(num_classes: int,
          backbone: Optional[TorchvisionBackboneConfig] = None,
          remove_internal_transforms: bool = True,
          **mask_rcnn_kwargs) -> nn.Module:
    """MaskRCNN model implemented by torchvision.

    # Arguments
        num_classes: Number of classes.
        backbone: Backbone model to use. Defaults to a resnet50_fpn model.
        remove_internal_transforms: The torchvision model internally applies transforms
        like resizing and normalization, but we already do this at the `Dataset` level,
        so it's safe to remove those internal transforms.
        **mask_rcnn_kwargs: Keyword arguments that internally are going to be passed to
        `torchvision.models.detection.mask_rcnn.MaskRCNN`.

    # Return
        A Pytorch `nn.Module`.
    """
    if backbone is None:
        model = maskrcnn_resnet50_fpn(pretrained=True,
                                      pretrained_backbone=True,
                                      **mask_rcnn_kwargs)

        in_features_box = model.roi_heads.box_predictor.cls_score.in_features
        model.roi_heads.box_predictor = FastRCNNPredictor(
            in_features_box, num_classes)

        in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
        model.roi_heads.mask_predictor = MaskRCNNPredictor(
            in_channels=in_features_mask,
            dim_reduced=256,
            num_classes=num_classes)

        resnet_fpn.patch_param_groups(model.backbone)
    else:
        model = MaskRCNN(backbone.backbone,
                         num_classes=num_classes,
                         **mask_rcnn_kwargs)

    patch_rcnn_param_groups(model=model)

    if remove_internal_transforms:
        remove_internal_model_transforms(model)

    return model
コード例 #6
0
def model(num_classes: int,
          backbone: Optional[nn.Module] = None,
          remove_internal_transforms: bool = True,
          **faster_rcnn_kwargs) -> nn.Module:
    """ FasterRCNN model given by torchvision

    Args:
        num_classes (int): Number of classes.
        backbone (nn.Module): Backbone model to use. Defaults to a resnet50_fpn model.

    Return:
        nn.Module
    """
    if backbone is None:
        model = maskrcnn_resnet50_fpn(pretrained=True, **faster_rcnn_kwargs)

        in_features_box = model.roi_heads.box_predictor.cls_score.in_features
        model.roi_heads.box_predictor = FastRCNNPredictor(
            in_features_box, num_classes)

        in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
        model.roi_heads.mask_predictor = MaskRCNNPredictor(
            in_channels=in_features_mask,
            dim_reduced=256,
            num_classes=num_classes)

        backbone_param_groups = resnet_fpn.param_groups(model.backbone)
    else:
        model = MaskRCNN(backbone,
                         num_classes=num_classes,
                         **faster_rcnn_kwargs)
        backbone_param_groups = backbone.param_groups()

    patch_param_groups(model=model,
                       backbone_param_groups=backbone_param_groups)

    if remove_internal_transforms:
        remove_internal_model_transforms(model)

    return model
コード例 #7
0
ファイル: learner.py プロジェクト: yasutak/SemTorch
def get_segmentation_learner(dls,
                             number_classes,
                             segmentation_type,
                             architecture_name,
                             backbone_name,
                             loss_func=None,
                             opt_func=Adam,
                             lr=defaults.lr,
                             splitter=trainable_params,
                             cbs=None,
                             pretrained=True,
                             normalize=True,
                             image_size=None,
                             metrics=None,
                             path=None,
                             model_dir='models',
                             wd=None,
                             wd_bn_bias=False,
                             train_bn=True,
                             moms=(0.95, 0.85, 0.95)):
    """This function return a learner for the provided architecture and backbone

    Parameters:
    dls (DataLoader): the dataloader to use with the learner
    number_classes (int): the number of clases in the project. It should be >=2
    segmentation_type (str): just `Semantic Segmentation` accepted for now 
    architecture_name (str): name of the architecture. The following ones are supported: `unet`, `deeplabv3+`, `hrnet`, `maskrcnn` and `u2^net`
    backbone_name (str): name of the backbone
    loss_func (): loss function.
    opt_func (): opt function.
    lr (): learning rates
    splitter (): splitter function for freazing the learner
    cbs (List[cb]): list of callbacks
    pretrained (bool): it defines if a trained backbone is needed
    normalize (bool): 
    image_size (int): REQUIRED for MaskRCNN. It indicates the desired size of the image.
    metrics (List[metric]): list of metrics
    path (): path parameter
    model_dir (str): the path in which save models
    wd (float): wieght decay
    wd_bn_bias (bool):
    train_bn (bool):
    moms (Tuple(float)): tuple of different momentuns
                    

    Returns:
    learner: value containing the learner object

    """

    number_classes_name = ""
    if number_classes == 2:
        number_classes_name = "binary"
    elif number_classes > 2:
        number_classes_name = "multiple"
    else:
        raise Exception("The number of classes must be >=2")

    check_architecture_configuration(number_classes=number_classes_name,
                                     segmentation_type=segmentation_type,
                                     architecture_name=architecture_name,
                                     backbone_name=backbone_name)

    learner = None

    if architecture_name == "unet":

        # TODO -> Revisar arch
        learner = unet_learner(dls=dls,
                               arch=unet_backbone_name[backbone_name],
                               metrics=metrics,
                               wd=wd,
                               loss_func=loss_func,
                               opt_func=opt_func,
                               lr=lr,
                               splitter=splitter,
                               cbs=cbs,
                               path=path,
                               model_dir=model_dir,
                               wd_bn_bias=wd_bn_bias,
                               train_bn=train_bn,
                               pretrained=pretrained,
                               normalize=normalize,
                               moms=moms)

    elif architecture_name == "deeplabv3+":

        model = DeepLabV3Plus(backbone_name=backbone_name,
                              nclass=number_classes,
                              pretrained=pretrained)
        learner = Learner(dls=dls,
                          model=model,
                          loss_func=loss_func,
                          opt_func=opt_func,
                          lr=lr,
                          splitter=splitter,
                          cbs=cbs,
                          metrics=metrics,
                          path=path,
                          model_dir=model_dir,
                          wd=wd,
                          wd_bn_bias=wd_bn_bias,
                          train_bn=train_bn)

    elif architecture_name == "hrnet":

        model = HRNet(nclass=number_classes,
                      backbone_name=backbone_name,
                      pretrained=pretrained)
        learner = Learner(dls=dls,
                          model=model,
                          loss_func=loss_func,
                          opt_func=opt_func,
                          lr=lr,
                          splitter=splitter,
                          cbs=cbs,
                          metrics=metrics,
                          path=path,
                          model_dir=model_dir,
                          wd=wd,
                          wd_bn_bias=wd_bn_bias,
                          train_bn=train_bn)

    elif architecture_name == "maskrcnn":
        if image_size is None:
            raise Exception(
                "MaskRCNN need to define image_size. This values are for reescaling the image"
            )

        model = maskrcnn_resnet50_fpn(num_classes=number_classes,
                                      min_size=image_size,
                                      max_size=image_size)
        learner = mask_rcnn.MaskRCNNLearner(dls=dls,
                                            model=model,
                                            loss_func=loss_func,
                                            opt_func=opt_func,
                                            lr=lr,
                                            splitter=splitter,
                                            cbs=cbs,
                                            metrics=metrics,
                                            path=path,
                                            model_dir=model_dir,
                                            wd=wd,
                                            wd_bn_bias=wd_bn_bias,
                                            train_bn=train_bn)

    elif architecture_name == "u2^net":
        model = None
        if backbone_name == "small":
            model = u2net.U2NETP(3, 1)
        elif backbone_name == "normal":
            model = u2net.U2NET(3, 1)

        learner = u2net.USquaredNetLearner(dls=dls,
                                           model=model,
                                           opt_func=opt_func,
                                           lr=lr,
                                           splitter=splitter,
                                           cbs=cbs,
                                           metrics=metrics,
                                           path=path,
                                           model_dir=model_dir,
                                           wd=wd,
                                           wd_bn_bias=wd_bn_bias,
                                           train_bn=train_bn)

    return learner
コード例 #8
0
box_head = TwoMLPHead(in_channels=7 * 7 * 256, representation_size=128)
box_predictor = FastRCNNPredictor(in_channels=128, num_classes=3)
mask_roi_pool = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0, 1, 2, 3],
                                                   output_size=14,
                                                   sampling_ratio=2)
mask_predictor = MaskRCNNPredictor(in_channels=256,
                                   dim_reduced=256,
                                   num_classes=3)

inference_args['box_head'] = box_head
inference_args['rpn_anchor_generator'] = anchor_generator
inference_args['mask_roi_pool'] = mask_roi_pool
inference_args['mask_predictor'] = mask_predictor
inference_args['box_predictor'] = box_predictor

maskrcnn_model = maskrcnn_resnet50_fpn(pretrained=False, **inference_args)
maskrcnn_model.load_state_dict(covid_detector_weights['model_weights'])
maskrcnn_model.eval()
maskrcnn_model = maskrcnn_model.to(device)
thresholds = torch.arange(0.5, 1, 0.05).to(device)
mean_aps_all_th = torch.zeros(thresholds.size()[0]).to(device)


def compute_map(iou_th):
    mean_aps_this_th = torch.zeros(len(dataloader_eval_covid),
                                   dtype=torch.float)
    for id, b in enumerate(dataloader_eval_covid):
        X, y = b
        if device == torch.device('cuda'):
            X, y['labels'], y['boxes'], y['masks'] = X.to(
                device), y['labels'].to(device), y['boxes'].to(
コード例 #9
0
    '79': 'oven',
    '80': 'toaster',
    '81': 'sink',
    '82': 'refrigerator',
    '84': 'book',
    '85': 'clock',
    '86': 'vase',
    '87': 'scissors',
    '88': 'teddybear',
    '89': 'hair drier',
    '90': 'toothbrush'
}

SCORE_THRESHOLD = 0.5

model = maskrcnn_resnet50_fpn(pretrained=True)
model.eval()

img1 = skimage.io.imread("img2.jpeg")
img2 = skimage.io.imread("img1.jpeg")

x = [
    torch.from_numpy(np.transpose(img1 / 255.0, axes=[2, 0,
                                                      1])).to(torch.float32),
    torch.from_numpy(np.transpose(img2 / 255.0, axes=[2, 0,
                                                      1])).to(torch.float32)
]

out = model(x)
filtered_idx = (out[0]['scores'] >= SCORE_THRESHOLD)
コード例 #10
0
def date_for_filename():
    tgt = time.localtime()
    year = str(tgt.tm_year)
    mon = "{:02}".format(tgt.tm_mon)
    day = "{:02}".format(tgt.tm_mday)
    hour = "{:02}".format(tgt.tm_hour)
    minute = "{:02}".format(tgt.tm_min)
    datestr = year + mon + day + '_' + hour + minute
    
    
    return datestr

if __name__=="__main__":
    model = maskrcnn_resnet50_fpn(pretrained=False, \
                              pretrained_backbone=False,\
                              num_classes=49)
    device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
    
    #model.load_state_dict(torch.load('/home/karrington/FantomHD/CDC4-5-6-7-8-9(CleanRedo89Ers)_train-json-20210831_1301.pth'))
    
    model = model.to(device)
    
    # create test and train dataset
    dataset = myOwnDataset(root=train_data_dir,
                          annotation=train_coco,
                          transforms=get_transform(train=True)
                          )
    dataset_test = myOwnDataset(root=train_data_dir,
                          annotation=test_coco,
                          transforms=get_transform(train=False)
コード例 #11
0
ファイル: train.py プロジェクト: kostas1515/clean_project
def main(args):
    utils.init_distributed_mode(args)
    print(args)

    device = torch.device(args.device)

    # Data loading code
    print("Loading data")

    dataset, num_classes = get_dataset(args.dataset, "train",
                                       get_transform(train=True),
                                       args.data_path)
    dataset_test, _ = get_dataset(args.dataset, "val",
                                  get_transform(train=False), args.data_path)

    print("Creating data loaders")
    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            dataset)
        test_sampler = torch.utils.data.distributed.DistributedSampler(
            dataset_test)
    else:
        train_sampler = torch.utils.data.RandomSampler(dataset)
        test_sampler = torch.utils.data.SequentialSampler(dataset_test)

    if args.aspect_ratio_group_factor >= 0:
        group_ids = create_aspect_ratio_groups(
            dataset, k=args.aspect_ratio_group_factor)
        train_batch_sampler = GroupedBatchSampler(train_sampler, group_ids,
                                                  args.batch_size)
    else:
        train_batch_sampler = torch.utils.data.BatchSampler(train_sampler,
                                                            args.batch_size,
                                                            drop_last=True)

    data_loader = torch.utils.data.DataLoader(
        dataset,
        batch_sampler=train_batch_sampler,
        num_workers=args.workers,
        collate_fn=utils.collate_fn)

    data_loader_test = torch.utils.data.DataLoader(dataset_test,
                                                   batch_size=1,
                                                   sampler=test_sampler,
                                                   num_workers=args.workers,
                                                   collate_fn=utils.collate_fn)

    print("Creating model")
    kwargs = {
        "trainable_backbone_layers": args.trainable_backbone_layers,
    }
    if (args.tfidf):
        tfidf = pd.read_csv(f'../{args.dataset}_files/idf_{num_classes}.csv')[
            args.tfidf]
        tfidf = torch.tensor(tfidf, device='cuda').unsqueeze(0)
    else:
        tfidf = torch.ones(num_classes, device='cuda',
                           dtype=torch.float).unsqueeze(0)

    if "rcnn" in args.model:
        if args.rpn_score_thresh is not None:
            kwargs["rpn_score_thresh"] = args.rpn_score_thresh
    if args.model == 'fasterrcnn_resnet50_fpn':
        model = frcnn.fasterrcnn_resnet50_fpn(pretrained=args.pretrained,
                                              num_classes=num_classes,
                                              tfidf=tfidf)
    elif args.model == 'retinanet_resnet50_fpn':
        model = retinanet.retinanet_resnet50_fpn(pretrained=args.pretrained,
                                                 num_classes=num_classes,
                                                 tfidf=tfidf)
    elif args.model == 'maskrcnn_resnet50_fpn':
        model = mask_rcnn.maskrcnn_resnet50_fpn(pretrained=args.pretrained,
                                                num_classes=num_classes,
                                                tfidf=tfidf)

    # model = torchvision.models.detection.__dict__[args.model](num_classes=num_classes, pretrained=args.pretrained,
    #                                                           **kwargs)
    model.to(device)

    model_without_ddp = model
    if args.distributed:
        model = torch.nn.parallel.DistributedDataParallel(
            model, device_ids=[args.gpu])
        model_without_ddp = model.module

    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.SGD(params,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_step_size, gamma=args.lr_gamma)
    lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=args.lr_steps, gamma=args.lr_gamma)

    if args.resume:
        checkpoint = torch.load(args.resume, map_location='cpu')
        model_without_ddp.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
        args.start_epoch = checkpoint['epoch'] + 1

    if args.test_only:
        evaluate(model, data_loader_test, device=device)
        return

    print("Start training")
    start_time = time.time()
    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)
        train_one_epoch(model, optimizer, data_loader, device, epoch,
                        args.print_freq)
        lr_scheduler.step()
        if args.output_dir:
            utils.save_on_master(
                {
                    'model': model_without_ddp.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'lr_scheduler': lr_scheduler.state_dict(),
                    'args': args,
                    'epoch': epoch
                }, os.path.join(args.output_dir, 'model_{}.pth'.format(epoch)))

        # evaluate after every epoch
        evaluate(model, data_loader_test, device=device)

    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=int(total_time)))
    print('Training time {}'.format(total_time_str))