Пример #1
0
 def __init__(self, num_classes):
     super().__init__()
     model = torchvision.models.detection.maskrcnn_resnet50_fpn(
         pretrained=True)
     self.transform = model.transform
     self.backbone = model.backbone
     self.classifier = DeepLabHead(model.backbone.out_channels, num_classes)
Пример #2
0
def deeplabv3_efficientnet(n):
    backbone = EfficientNetFeatureExtractor(n)

    inplanes = efficientnet_inplanes[n]
    classifier = DeepLabHead(inplanes, 3)

    return SimpleSegmentationModel(backbone, classifier)
Пример #3
0
    def __init__(self, num_classes, backbone_fn, chip_size=224):
        super().__init__()
        if getattr(backbone_fn, '_is_multispectral', False):
            self.backbone = create_body(backbone_fn,
                                        pretrained=True,
                                        cut=_get_backbone_meta(
                                            backbone_fn.__name__)['cut'])
        else:
            self.backbone = create_body(backbone_fn, pretrained=True)

        backbone_name = backbone_fn.__name__

        ## Support for different backbones
        if "densenet" in backbone_name or "vgg" in backbone_name:
            hookable_modules = list(self.backbone.children())[0]
        else:
            hookable_modules = list(self.backbone.children())

        if "vgg" in backbone_name:
            modify_dilation_index = -5
        else:
            modify_dilation_index = -2

        if backbone_name == 'resnet18' or backbone_name == 'resnet34':
            module_to_check = 'conv'
        else:
            module_to_check = 'conv2'

        ## Hook at the index where we need to get the auxillary logits out
        self.hook = hook_output(hookable_modules[modify_dilation_index])

        custom_idx = 0
        for i, module in enumerate(hookable_modules[modify_dilation_index:]):
            dilation = 2 * (i + 1)
            padding = 2 * (i + 1)
            for n, m in module.named_modules():
                if module_to_check in n:
                    m.dilation, m.padding, m.stride = (dilation, dilation), (
                        padding, padding), (1, 1)
                elif 'downsample.0' in n:
                    m.stride = (1, 1)

            if "vgg" in backbone_fn.__name__:
                if isinstance(module, nn.Conv2d):
                    dilation = 2 * (custom_idx + 1)
                    padding = 2 * (custom_idx + 1)
                    module.dilation, module.padding, module.stride = (
                        dilation, dilation), (padding, padding), (1, 1)
                    custom_idx += 1

        ## returns the size of various activations
        feature_sizes = model_sizes(self.backbone, size=(chip_size, chip_size))
        ## Geting the number of channel persent in stored activation inside of the hook
        num_channels_aux_classifier = self.hook.stored.shape[1]
        ## Get number of channels in the last layer
        num_channels_classifier = feature_sizes[-1][1]

        self.classifier = DeepLabHead(num_channels_classifier, num_classes)
        self.aux_classifier = FCNHead(num_channels_aux_classifier, num_classes)
Пример #4
0
def _get_model():
    # DeepLab V3 encoder, ResNet 101 decoder, use pretrained model.
    model = models.segmentation.deeplabv3_resnet101(pretrained=True,
                                                    progress=True)
    # Feature vector size = 2048 (resnet101), output channels = 1
    model.classifier = DeepLabHead(2048, 1)

    return model
Пример #5
0
    def __init__(self, num_classes):
        super(DeepSqueeze, self).__init__()
        backbone = squeeze.features

        classifier = DeepLabHead(512, num_classes)

        self.backbone = backbone
        self.classifier = classifier
        self.aux_classifier = None
Пример #6
0
def _create_deeplab(num_class, backbone, pretrained=True, **kwargs):
    '''
    Create default torchvision pretrained model with resnet101.
    '''
    model = models.segmentation.deeplabv3_resnet101(pretrained=True, progress=True, **kwargs)
    model = _DeepLabOverride(model.backbone, model.classifier, model.aux_classifier)
    model.classifier = DeepLabHead(2048, num_class)
    model.aux_classifier = FCNHead(1024, num_class)

    return model
Пример #7
0
def efficient_deeplabv3(args):
    # args.backbone には 'efficientnet-b0'などが入る
    efficientnet = EfficientNetExtractor.from_name(args.backbone,
                                                   in_channels=1)

    num_ch = efficientnet._conv_head.out_channels
    classifier = DeepLabHead(num_ch, args.num_classes)
    base_model = Deeplabv3
    model = base_model(efficientnet, classifier)
    return model
Пример #8
0
def createDeepLabv3(outputchannels=1):
    """DeepLabv3 class with custom head
    Args:
        outputchannels (int, optional): The number of output channels
        in your dataset masks. Defaults to 1.
    Returns:
        model: Returns the DeepLabv3 model with the ResNet50 backbone.
    """
    model = models.segmentation.deeplabv3_resnet50(pretrained=True,
                                                   progress=True)
    model.classifier = DeepLabHead(2048, outputchannels)
    return model
Пример #9
0
 def __init__(self, in_chan=3, out_chan=2, pretrained=False):
     super(densenet50, self).__init__()
     self.model = torchvision.models.segmentation.deeplabv3_resnet50(
         pretrained=False, pretrained_backbone=pretrained)
     self.model.classifier = DeepLabHead(2048, out_chan)
     if in_chan != 3:
         self.model.backbone.conv1 = nn.Conv2d(in_chan,
                                               64,
                                               kernel_size=7,
                                               stride=2,
                                               padding=3,
                                               bias=False)
Пример #10
0
def deeplabv3_resnet101():
    backbone = resnet.__dict__['resnet101'](
        pretrained=True, replace_stride_with_dilation=[False, True, True])

    return_layers = {'layer4': 'out'}

    backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)

    inplanes = 2048
    classifier = DeepLabHead(inplanes, 3)

    return SimpleSegmentationModel(backbone, classifier)
Пример #11
0
def createDeepLabv3(outputchannels, backboneFreez=False):
    model = models.segmentation.deeplabv3_resnet101(pretrained=True, progress=True)
    
    if backboneFreez:
        for param in model.parameters():
            param.requires_grad = False

    # Allocate a new classifier
    model.classifier = DeepLabHead(2048, outputchannels)

    # Set the model in training mode
    model.train()
    return model
Пример #12
0
def get_model(num_classes):
    # load an instance segmentation model pre-trained pre-trained on COCO
    model = deeplabv3_resnet101(pretrained=True)

    for param in model.parameters():
        param.requires_grad = False

    deeplab_head_in_channels = model.classifier[0].convs[0][0].in_channels
    fcn_head_in_channels = model.classifier[1].in_channels

    model.classifier = DeepLabHead(deeplab_head_in_channels, num_classes)

    return model
Пример #13
0
    def __init__(self, num_classes):
        super().__init__()

        self.num_classes = num_classes

        self.segm = deeplabv3_resnet101(pretrained=True)

        for param in self.segm.parameters():
            param.requires_grad = False

        self.segm.classifier = DeepLabHead(in_channels=2048,
                                           num_classes=num_classes)

        self.segm.classifier.apply(_init_weights)
Пример #14
0
    def __loadSegModel(self):

        seg_model = models.segmentation.deeplabv3_resnet101(pretrained=True,
                                                            progress=True)

        seg_model.classifier = DeepLabHead(2048, 1)

        checkpoint = torch.load(self.seg_checkpoint_path,
                                map_location=self.device)

        seg_model.load_state_dict(checkpoint['model_state_dict'])

        seg_model.eval()

        return seg_model
Пример #15
0
def createDeepLabv3(outputchannels=1, mode='train'):
    """DeepLabv3 class with custom head
    Args:
        outputchannels (int, optional): The number of output channels
        in your dataset masks. Defaults to 1.
    Returns:
        model: Returns the DeepLabv3 model with the ResNet101 backbone.
    """
    model = models.segmentation.deeplabv3_resnet50(pretrained=True,
                                                   progress=True)
    model.classifier = DeepLabHead(2048, outputchannels)

    # Set the model in training mode
    if mode == 'train':
        model.train()
    else:
        model.eval()

    return model.float()
Пример #16
0
def deeplabv3(pretrained=False,
              resnet="res103",
              head_in_ch=2048,
              num_classes=21):
    resnet = {"res53": resnet53, "res103": resnet103}[resnet]

    net = SmallDeepLab(backbone=IntermediateLayerGetter(resnet(
        pretrained=True, replace_stride_with_dilation=[False, True, True]),
                                                        return_layers={
                                                            'layer2': 'res2',
                                                            'layer4': 'out'
                                                        }),
                       classifier=DeepLabHead(head_in_ch, num_classes))
    if pretrained:
        state_dict = load_state_dict_from_url(
            'https://download.pytorch.org/models/deeplabv3_resnet101_coco-586e9e4e.pth',
            progress=True)
        net.load_state_dict(state_dict)
    return net
Пример #17
0
def pre_deeplabv3_resnet101(in_channel, out_channel):
    model = deeplabv3_resnet101(pretrained=False)
    url = 'https://download.pytorch.org/models/deeplabv3_resnet101_coco-586e9e4e.pth'
    model_dict = model.state_dict()
    pretrained_dict = model_zoo.load_url(url, progress=False)
    pretrained_dict = {
        k: v
        for k, v in pretrained_dict.items() if k in model_dict
    }
    model_dict.update(pretrained_dict)
    model.load_state_dict(model_dict)
    model.backbone.conv1 = nn.Conv2d(in_channel,
                                     64,
                                     kernel_size=7,
                                     stride=2,
                                     padding=3,
                                     bias=False)
    model.classifier = DeepLabHead(2048, out_channel)
    return model
Пример #18
0
def deeplabv3(pretrained=False,
              resnet="res103",
              head_in_ch=2048,
              num_classes=21):
    resnet = {
        "res53": resnet53,
        "res103": resnet103,
        "res50": resnet50,
        "res101": resnet101
    }[resnet]

    net = SmallDeepLab(backbone=IntermediateLayerGetter(resnet(
        pretrained=True, replace_stride_with_dilation=[False, True, True]),
                                                        return_layers={
                                                            'layer2': 'res2',
                                                            'layer4': 'out'
                                                        }),
                       classifier=DeepLabHead(head_in_ch, num_classes))
    return net
Пример #19
0
def createDeepLabv3(outputchannels=1):
    """DeepLabv3 class with custom head
    Args:
        outputchannels (int, optional): The number of output channels
        in your dataset masks. Defaults to 1.
    Returns:
        model: Returns the DeepLabv3 model with the ResNet101 backbone.
    """
    model = models.segmentation.deeplabv3_resnet101(pretrained=False,
                                                    progress=True)
    model.backbone.conv1 = nn.Conv2d(2,
                                     64,
                                     kernel_size=(7, 7),
                                     stride=(2, 2),
                                     padding=(3, 3),
                                     bias=False)
    model.classifier = DeepLabHead(2048, outputchannels)
    # Set the model in training mode
    # model.train()
    return model
Пример #20
0
def predict(model_checkpoint, image_path, out_file):
    """
    Args:
        model_checkpoint (string): path to model checkpoint
        image_path (string): path to an aerial image
        out_file (string): path to save the prediction mask.

    """
    DEVICE = torch.device("cpu")
    RGB_MEAN = [0.485, 0.456, 0.406]
    RGB_STD = [0.229, 0.224, 0.225]
    image = Image.open(str(image_path))
    image = image.resize((512, 512), Image.BILINEAR)
    image_transforms = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize(RGB_MEAN, RGB_STD)])

    image_tensor = image_transforms(image)[None]
    # model = FastSCNN(num_classes=3)
    # model = UNet(n_channels=3, n_classes=3, bilinear=True)
    model = deeplabv3(pretrained=False, progress=True)
    model.classifier = DeepLabHead(2048, 3)
    model.eval()
    model.to(DEVICE)
    image_tensor = image_tensor.to(device=DEVICE, dtype=torch.float)
    model.load_state_dict(
        torch.load(model_checkpoint,
                   map_location=lambda storage, loc: storage))
    out = model(image_tensor)['out'][0].squeeze()
    out_max = out.max(0, keepdim=False)[1].cpu().numpy()

    final_image = np.zeros((out_max.shape[0], out_max.shape[1], 3),
                           dtype=np.int)
    final_image[(out_max == 0), :] = np.array([255, 255, 255])
    final_image[(out_max == 1), :] = np.array([255, 0, 0])
    final_image[(out_max == 2), :] = np.array([0, 0, 255])
    # image.show()

    final_image_pil = Image.fromarray(np.uint8(final_image))
    final_image_pil.show()
    final_image_pil.save(out_file)
Пример #21
0
    def __init__(self, model_weight, img_dir, out_dir, device):
        self.model = models.segmentation.deeplabv3_resnet50(pretrained=False,
                                                            progress=True)
        self.model.classifier = DeepLabHead(2048, 1)

        self.model.to(device)
        model_weight = torch.load(model_weight)
        model_weight = model_weight["model_state_dict"]

        self.model.load_state_dict(model_weight, strict=False)

        self.img = Prediction(img_dir=img_dir)
        self.dataloader = DataLoader(self.img,
                                     batch_size=16,
                                     num_workers=4,
                                     shuffle=False)

        self.out_dir = out_dir
        os.makedirs(self.out_dir, exist_ok=True)

        self.device = device
Пример #22
0
def main():
    nlabel = 6
    df = pd.read_csv(os.path.join(args.root, "train.csv"))
    dataset = ProstateSeg(df, args.root, 2048)
    loader = DataLoader(dataset,num_workers = 4, pin_memory=True)
    model = models.segmentation.deeplabv3_resnet101(
                pretrained=True, progress=True)
    model.classifier = DeepLabHead(2048, nlabel)
    print('Evaluate using {}...'
            .format(args.checkpoint))
    weight_file=os.path.join(args.checkpoint)
    model.load_state_dict(torch.load(weight_file, map_location=lambda storage, loc: storage))
    
    if not os.path.exists(args.dump):
        os.mkdir(args.dump)
    
    model.eval()
    model.to(device)
    with torch.no_grad():
        t0 = time.time()
        for i, inputs in enumerate(loader):
            t1 = time.time()
            imid = df.iloc[i].image_id
            provider =  df.iloc[i].data_provider
            grade = df.iloc[i].isup_grade
            score = df.iloc[i].gleason_score
            inputs = inputs.to(device)   
            output = model(inputs)
            pred = output['out'].argmax(dim=1).detach().cpu()
            
            pp = np.zeros(nlabel)
            npix = np.prod(pred.shape)
            for i in range(nlabel):
                pp[i] = pred.eq(i).sum().item() / npix
            pp = pp[1:]/(pp[1:].sum())
            t2 = time.time()
            print("{:.3f}\t{:.3f}\t{:.3f}\t{:.3f}\t{:.3f}|{},{},{},{},{:.1f}s/{:.1f}s".format(
                    *pp, grade, score, provider, imid, t2-t1, (t2-t0)/(i+1)))
            torch.save(pred, os.path.join(args.dump, imid))
Пример #23
0
    def fit_with(lr: float, wd: float):
        model = torchvision.models.segmentation.deeplabv3_resnet50(
            pretrained=True)
        model.classifier = DeepLabHead(2048, 2)
        model.to(device)

        criterion = torch.nn.CrossEntropyLoss().to(device)

        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=lr,
                                     weight_decay=wd)

        model, acc, loss = train_model(model=model,
                                       criterion=criterion,
                                       dls=dataloaders,
                                       opt=optimizer,
                                       n_classes=2,
                                       name=f'h_param_{lr:.5f}_{wd:.5f}',
                                       log_path=args.logs_dir,
                                       epochs=args.epochs,
                                       verbose=False)

        return acc
Пример #24
0
    def __init__(self, num_classes: int = 2, ignore_index: Optional[int] = None, lr: float = 0.001,
                 weight_decay: float = 0.001, aux_loss_factor: float = 0.3):

        super().__init__()
        self.num_classes = num_classes
        self.ignore_index = ignore_index
        self.lr = lr
        self.weight_decay = weight_decay
        self.aux_loss_factor = aux_loss_factor

        # Create model from pre-trained DeepLabv3
        self.model = deeplabv3_resnet101(pretrained=True, progress=True)
        self.model.aux_classifier = FCNHead(1024, self.num_classes)
        self.model.classifier = DeepLabHead(2048, self.num_classes)

        # Setup trainable layers
        self.model.requires_grad_(True)
        self.model.backbone.requires_grad_(False)

        # Loss function and metrics
        self.focal_tversky_loss = FocalTverskyMetric(
            self.num_classes,
            alpha=0.7,
            beta=0.3,
            gamma=4.0 / 3.0,
            ignore_index=self.ignore_index,
        )
        self.accuracy_metric = Accuracy(ignore_index=self.ignore_index)
        self.iou_metric = JaccardIndex(
            num_classes=self.num_classes,
            reduction="none",
            ignore_index=self.ignore_index,
        )
        self.precision_metric = Precision(num_classes=self.num_classes, ignore_index=self.ignore_index,
                                          average='weighted', mdmc_average='global')
        self.recall_metric = Recall(num_classes=self.num_classes, ignore_index=self.ignore_index,
                                    average='weighted', mdmc_average='global')
def main():
    # dataset
    train_df = pd.read_csv(os.path.join(args.root, "train.csv"))
    is_radboud = (train_df['data_provider'] == 'radboud')
    by_radboud = train_df[is_radboud]
    #by_karolinska = train_df[np.logical_not(is_radboud)]
    num = 0
    for idx, row in by_radboud.iterrows():
        img_id = row['image_id']
        mask_path = os.path.join(args.root, "train_label_masks",
                                 img_id + "_mask.tiff")
        if not os.path.isfile(mask_path):
            num += 1
            #print("{}:{} mask not work!".format(num,img_id))
            by_radboud = by_radboud.drop(idx)

    df = {}
    df['train'], df['val'] = train_test_split(by_radboud,
                                              stratify=by_radboud.isup_grade,
                                              test_size=20,
                                              random_state=42)

    dataset = {
        'val':
        ProstateSeg(df['val'], args.root, args.size,
                    (args.crop_size, args.crop_size), 'val')
    }
    loader = {
        'val':
        DataLoader(dataset['val'], num_workers=args.workers, pin_memory=True)
    }
    model = models.segmentation.deeplabv3_resnet101(pretrained=True,
                                                    progress=True)
    model.classifier = DeepLabHead(2048, nlabel)
    if args.checkpoint:
        print('Resuming training from epoch {}, loading {}...'.format(
            args.resume_epoch, args.checkpoint))
        weight_file = os.path.join(args.output_folder, args.checkpoint)
        model.load_state_dict(
            torch.load(weight_file, map_location=lambda storage, loc: storage))

    model.to(device)
    criterion = FocalLoss(alpha=torch.tensor(
        [1, 1.4, 6, 6, 5, 8], dtype=torch.float32, device=device))
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.lr,
                                momentum=0.9,
                                weight_decay=args.weight_decay)

    for epoch in range(args.resume_epoch, args.epochs):
        s1, s2 = int(0.9 * args.crop_size), int(1.1 * args.crop_size)
        crop = (randint(s1, s2), randint(s1, s2))
        dataset['train'] = ProstateSeg(df['train'], args.root, args.size, crop,
                                       'train')
        loader['train'] = DataLoader(dataset['train'],
                                     batch_size=args.batch_size,
                                     shuffle=True,
                                     num_workers=args.workers,
                                     pin_memory=True)
        adjust_lr(optimizer, epoch, args)
        for phase in ['train', 'val']:
            t0 = time.time()
            print("========={}:{}=========".format(phase, epoch))
            if phase == 'train':
                model.train()
            else:
                model.eval()
            num = 0
            correct = 0
            nums = np.zeros(6, dtype=int)
            pros = np.zeros(6, dtype=int)
            corrects = np.zeros(6, dtype=int)
            for i, (inputs, masks) in enumerate(loader[phase]):
                t1 = time.time()
                if i == 0: print(inputs.shape)
                inputs = inputs.to(device)
                masks = masks.to(device)
                optimizer.zero_grad()
                with torch.set_grad_enabled(phase == 'train'):
                    output = model(inputs)
                    loss = criterion(output['out'], masks)
                    if phase == 'train':
                        loss.backward()
                        optimizer.step()
                    pred = output['out'].argmax(dim=1)
                    correct = pred.eq(masks).sum().item()
                    npixel = np.prod(masks.shape)
                    acc = correct * 100 / npixel
                    num += masks.size(0)

                    if (i + 1) % args.log == 0:
                        t2 = time.time()
                        s = "({},{:.1f}s,{:.1f}s) Loss:{:.3f} Acc:{:.3f}"
                        print(
                            s.format(num, t2 - t1, (t2 - t0) / (i + 1),
                                     loss.item(), acc))
                    if phase == 'val':
                        for i in range(nlabel):
                            t = masks.eq(i)
                            p = pred.eq(i)
                            nums[i] += t.sum().item()
                            pros[i] += p.sum().item()
                            corrects[i] += (p & t).sum().item()

            if epoch % 1 == 0 and phase == "train":
                torch.save(
                    model.state_dict(),
                    os.path.join(args.output_folder,
                                 "deeplab-{}.pth".format(epoch)))
            if phase == 'val':
                print("recall:" + "|".join(
                    ["{:.5f}".format(c / n) for c, n in zip(corrects, nums)]))
                print("precision:" + "|".join(
                    ["{:.5f}".format(c / p) for c, p in zip(corrects, pros)]))
Пример #26
0
 def __init__(self):
     super().__init__()
     self.model = deeplabv3(pretrained=False, progress=True)
     self.model.classifier = DeepLabHead(2048, 3)
Пример #27
0
def loadModel(model_arch="",
              classes=None,
              pre_trained_path=None,
              expType=None,
              trainable_backbone_flag=False,
              lower_features=False):
    print("Load model architecture ... ")

    if (model_arch == "deeplabv3_resnet101_orig"):
        print("deeplab_resnet architecture selected ...")
        model = models.segmentation.deeplabv3_resnet101(pretrained=True,
                                                        progress=True)

        for params in model.parameters():
            params.requires_grad = trainable_backbone_flag

        model.classifier[-1] = torch.nn.Conv2d(256,
                                               len(classes),
                                               kernel_size=(1, 1))
        model.aux_classifier[-1] = torch.nn.Conv2d(256,
                                                   len(classes),
                                                   kernel_size=(1, 1))
        features = model.backbone

        if (pre_trained_path != None):
            print("load pre-trained-weights ... ")
            model_dict_state = torch.load(
                pre_trained_path)  # + "/best_model.pth")
            model.load_state_dict(model_dict_state['net'])
        return model, features

    elif (model_arch == "fcn_resnet101_orig"):
        print("deeplab_resnet architecture selected ...")
        model = models.segmentation.fcn_resnet101(pretrained=True,
                                                  progress=True)

        for params in model.parameters():
            params.requires_grad = trainable_backbone_flag

        model.classifier[-1] = torch.nn.Conv2d(512,
                                               len(classes),
                                               kernel_size=(1, 1))
        model.aux_classifier[-1] = torch.nn.Conv2d(256,
                                                   len(classes),
                                                   kernel_size=(1, 1))
        features = model.backbone

        if (pre_trained_path != None):
            print("load pre-trained-weights ... ")
            model_dict_state = torch.load(
                pre_trained_path)  # + "/best_model.pth")
            model.load_state_dict(model_dict_state['net'])

        return model, features

    elif (model_arch == "deeplabv3_resnet101"):
        print("deeplabv3_resnet101 architecture selected ...")
        backbone_net = CNN(model_arch="resnet101",
                           n_classes=len(classes),
                           include_top=False,
                           pretrained=trainable_backbone_flag,
                           lower_features=lower_features)

        if (lower_features == True):
            classifier = nn.Sequential(DeepLabHead(256, len(classes)),
                                       # nn.Softmax()
                                       )
        else:
            classifier = nn.Sequential(DeepLabHead(2048, len(classes)),
                                       # nn.Softmax()
                                       )

        features = backbone_net
        model = models.segmentation.DeepLabV3(backbone=backbone_net,
                                              classifier=classifier,
                                              aux_classifier=None)

        if (pre_trained_path != None):
            print("load pre-trained-weights ... ")
            model_dict_state = torch.load(
                pre_trained_path)  # + "/best_model.pth")
            model.load_state_dict(model_dict_state['net'])
        return model, features

    elif (model_arch == "deeplabv3_vgg16"):
        print("deeplabv3_vgg architecture selected ...")
        # backbone_net = CNN(model_arch="resnet101", n_classes=len(classes), include_top=False)
        backbone_net = CNN(model_arch="vgg16",
                           n_classes=len(classes),
                           include_top=False,
                           pretrained=trainable_backbone_flag,
                           lower_features=lower_features)

        if (lower_features == True):
            classifier = nn.Sequential(DeepLabHead(64, len(classes)),
                                       # nn.Softmax()
                                       )
        else:
            classifier = nn.Sequential(DeepLabHead(512, len(classes)),
                                       # nn.Softmax()
                                       )

        features = backbone_net
        model = models.segmentation.DeepLabV3(backbone=backbone_net,
                                              classifier=classifier,
                                              aux_classifier=None)
        #print(model)
        #exit()
        if (pre_trained_path != None):
            print("load pre-trained-weights ... ")
            model_dict_state = torch.load(
                pre_trained_path)  # + "/best_model.pth")
            model.load_state_dict(model_dict_state['net'])

        # Find total parameters and trainable parameters
        total_params = sum(p.numel() for p in model.parameters())
        print("total_params:" + str(total_params))
        total_trainable_params = sum(p.numel() for p in model.parameters()
                                     if p.requires_grad)
        print("total_trainable_params: " + str(total_trainable_params))
        #exit()

        return model, features

    elif (model_arch == "deeplabv3_mobilenet"):
        print("deeplabv3_mobilenet architecture selected ...")
        backbone_net = CNN(model_arch="mobilenet",
                           n_classes=len(classes),
                           include_top=False,
                           pretrained=trainable_backbone_flag,
                           lower_features=lower_features)

        if (lower_features == True):
            classifier = nn.Sequential(DeepLabHead(32, len(classes)),
                                       # nn.Softmax()
                                       )
        else:
            classifier = nn.Sequential(DeepLabHead(1280, len(classes)),
                                       # nn.Softmax()
                                       )

        features = backbone_net
        model = models.segmentation.DeepLabV3(backbone=backbone_net,
                                              classifier=classifier,
                                              aux_classifier=None)

        if (pre_trained_path != None):
            print("load pre-trained-weights ... ")
            model_dict_state = torch.load(pre_trained_path)
            model.load_state_dict(model_dict_state['net'])

        return model, features

    elif (model_arch == "deeplabv3_squeezenet"):
        print("deeplabv3_mobilenet architecture selected ...")
        backbone_net = CNN(model_arch="squeezenet",
                           n_classes=len(classes),
                           include_top=False,
                           pretrained=trainable_backbone_flag,
                           lower_features=lower_features)

        if (lower_features == True):
            classifier = nn.Sequential(DeepLabHead(128, len(classes)),
                                       # nn.Softmax()
                                       )
        else:
            classifier = nn.Sequential(DeepLabHead(512, len(classes)),
                                       # nn.Softmax()
                                       )

        features = backbone_net
        model = models.segmentation.DeepLabV3(backbone=backbone_net,
                                              classifier=classifier,
                                              aux_classifier=None)

        if (pre_trained_path != None):
            print("load pre-trained-weights ... ")
            model_dict_state = torch.load(
                pre_trained_path)  # + "/best_model.pth")
            model.load_state_dict(model_dict_state['net'])

        return model, features

    elif (model_arch == "fcn_vgg16"):
        print("fcn_vgg16 architecture selected ...")
        backbone_net = CNN(model_arch="vgg16",
                           n_classes=len(classes),
                           include_top=False,
                           pretrained=trainable_backbone_flag,
                           lower_features=lower_features)

        if (lower_features == True):
            classifier = nn.Sequential(FCNHead(64, len(classes)),
                                       # nn.Softmax()
                                       )
        else:
            classifier = nn.Sequential(FCNHead(512, len(classes)),
                                       # nn.Softmax()
                                       )
        features = backbone_net
        model = models.segmentation.FCN(backbone=backbone_net,
                                        classifier=classifier,
                                        aux_classifier=None)
        # print(model)

        if (pre_trained_path != None):
            print("load pre-trained-weights ... ")
            model_dict_state = torch.load(
                pre_trained_path)  # + "/best_model.pth")
            model.load_state_dict(model_dict_state['net'])

        return model, features
    elif (model_arch == "fcn_resnet101"):
        print("fcn_resnet101 architecture selected ...")
        backbone_net = CNN(model_arch="resnet101",
                           n_classes=len(classes),
                           include_top=False,
                           pretrained=trainable_backbone_flag,
                           lower_features=lower_features)

        if (lower_features == True):
            classifier = nn.Sequential(FCNHead(256, len(classes)),
                                       # nn.Softmax()
                                       )
        else:
            classifier = nn.Sequential(FCNHead(2048, len(classes)),
                                       # nn.Softmax()
                                       )

        features = backbone_net
        model = models.segmentation.FCN(backbone=backbone_net,
                                        classifier=classifier,
                                        aux_classifier=None)

        if (pre_trained_path != None):
            print("load pre-trained-weights ... ")
            model_dict_state = torch.load(
                pre_trained_path)  # + "/best_model.pth")
            model.load_state_dict(model_dict_state['net'])

        # Find total parameters and trainable parameters
        total_params = sum(p.numel() for p in model.parameters())
        print("total_params:" + str(total_params))
        total_trainable_params = sum(p.numel() for p in model.parameters()
                                     if p.requires_grad)
        print("total_trainable_params: " + str(total_trainable_params))
        #exit()

        return model, features

    elif (model_arch == "fcn_squeezenet"):
        print("deeplabv3_squeezenet architecture selected ...")
        backbone_net = CNN(model_arch="squeezenet",
                           n_classes=len(classes),
                           include_top=False,
                           pretrained=trainable_backbone_flag,
                           lower_features=lower_features)

        if (lower_features == True):
            classifier = nn.Sequential(FCNHead(128, len(classes)),
                                       # nn.Softmax()
                                       )
        else:
            classifier = nn.Sequential(FCNHead(512, len(classes)),
                                       # nn.Softmax()
                                       )

        features = backbone_net
        model = models.segmentation.FCN(backbone=backbone_net,
                                        classifier=classifier,
                                        aux_classifier=None)

        if (pre_trained_path != None):
            print("load pre-trained-weights ... ")
            model_dict_state = torch.load(
                pre_trained_path)  # + "/best_model.pth")
            model.load_state_dict(model_dict_state['net'])

        # Find total parameters and trainable parameters
        total_params = sum(p.numel() for p in model.parameters())
        print("total_params:" + str(total_params))
        total_trainable_params = sum(p.numel() for p in model.parameters()
                                     if p.requires_grad)
        print("total_trainable_params: " + str(total_trainable_params))
        # exit()
        return model, features

    elif (model_arch == "fcn_mobilenet"):
        print("deeplabv3_mobilenet architecture selected ...")
        backbone_net = CNN(model_arch="mobilenet",
                           n_classes=len(classes),
                           include_top=False,
                           pretrained=trainable_backbone_flag,
                           lower_features=lower_features)

        if (lower_features == True):
            classifier = nn.Sequential(FCNHead(32, len(classes)),
                                       # nn.Softmax()
                                       )
        else:
            classifier = nn.Sequential(FCNHead(1280, len(classes)),
                                       # nn.Softmax()
                                       )

        features = backbone_net
        model = models.segmentation.FCN(backbone=backbone_net,
                                        classifier=classifier,
                                        aux_classifier=None)

        if (pre_trained_path != None):
            print("load pre-trained-weights ... ")
            model_dict_state = torch.load(
                pre_trained_path)  # + "/best_model.pth")
            model.load_state_dict(model_dict_state['net'])

        # Find total parameters and trainable parameters
        total_params = sum(p.numel() for p in model.parameters())
        print("total_params:" + str(total_params))
        total_trainable_params = sum(p.numel() for p in model.parameters()
                                     if p.requires_grad)
        print("total_trainable_params: " + str(total_trainable_params))
        # exit()
        return model, features

    else:
        print("ERROR: select valid model architecture!")
        exit()
 def __init__(self, num_classes=12):
     super(TorchVisionDeepLabv3_ResNet50, self).__init__()
     self.seg_model = torchvision.models.segmentation.deeplabv3_resnet50(pretrained=True)
     self.seg_model.classifier = DeepLabHead(2048, num_classes)
Пример #29
0
        kwargs = {}
        if args.num_classes and not segmentation:
            logging.info("Using num_classes = %d" % args.num_classes)
            kwargs["num_classes"] = args.num_classes

        model = model_classes[mname](pretrained=args.pretrained, progress=args.verbose, **kwargs)

        if args.extract != None:
            logging.info("Extract layers: " + ", ".join(args.extract))
            return_nodes = {
                layer: layer for layer in args.extract
            }
            model = create_feature_extractor(model, return_nodes=return_nodes)

        if segmentation and 'deeplabv3' in mname:
            model.classifier = DeepLabHead(2048, args.num_classes)
        
        if args.to_dd_native:
            # Make model NativeModuleWrapper compliant
            model = Wrapper(model)

        model.eval()

        # tracing or scripting model (default)
        if args.trace:
            example = get_image_input(args.batch_size, args.img_width, args.img_height) 
            script_module = torch.jit.trace(model, example)
        else:
            script_module = torch.jit.script(model)

    filename = os.path.join(
Пример #30
0
"""
# Set the number of output channels (= number of classes)
num_classes = 7

# Model definition
seg_model = models.segmentation.deeplabv3_resnet101(pretrained=True,
                                                    progress=True)

# The auxiliary classifier is removed, and the pretrained weights are frozen
seg_model.aux_classifier = None
for param in seg_model.parameters():
    param.requires_grad = False

# The pretrained classifier is replaced by a new one with a custom number of classes.
# Since it comes after the freeze, its weights won't be frozen. They are the ones that we will fine-tune.
seg_model.classifier = DeepLabHead(2048, num_classes)

# Model serialisation
model_filename = 'custom_segmented.pt'
pathToModel = pathlib.Path.cwd().joinpath('..', 'models', model_filename)
print('File name for saved model: ', pathToModel)

# Loss function
criterion = torch.nn.CrossEntropyLoss(
)  # combines nn.LogSoftmax() and nn.NLLLoss(), well suited for multiclass classification problems

# Optimizer definition
optimizer = torch.optim.SGD(seg_model.parameters(), lr=0.001, momentum=0.9)

# Use cpu/gpu based on availability
seg_model.to(device)