예제 #1
0
 def __init__(self, smg_config=None):
     if not smg_config:
         smg_config = defaultconfig
     self.cammodel = DenseNet(block_config=(6, 12, 48, 32), num_classes=4)
     self.gbpmodel = DenseNet(block_config=(6, 12, 48, 32), num_classes=4)
     self.cammodel.load_state_dict(
         torch.load(smg_config["pretrain_path"],
                    map_location='cpu')["state_dict"])
     self.gbpmodel.load_state_dict(
         torch.load(smg_config["pretrain_path"],
                    map_location='cpu')["state_dict"])
     self.cammodel.eval()
     self.gbpmodel.eval()
     self.gradcam = GradCam(self.cammodel, "features.denseblock4",
                            smg_config["use_cuda"])
     self.guidebp = GuidedBackpropReLUModel(self.gbpmodel,
                                            smg_config["use_cuda"])
     self.useunet = False
     if smg_config["unet_path"]:
         self.useunet = True
         self.unet = UNet(3, 3)
         self.unet.load_state_dict(
             torch.load(smg_config["unet_path"], map_location='cpu'))
         self.unet.eval()
         if smg_config["use_cuda"]:
             self.unet.cuda()
예제 #2
0
def densenet169(pretrained=False, model_path=None, **kwargs):
    r"""Densenet-169 model from
    `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_

    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
    """
    model = DenseNet(num_init_features=64,
                     growth_rate=32,
                     block_config=(6, 12, 32, 32),
                     **kwargs)
    if pretrained:
        # '.'s are no longer allowed in module names, but pervious _DenseLayer
        # has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
        # They are also in the checkpoints in model_urls. This pattern is used
        # to find such keys.
        pattern = re.compile(
            r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$'
        )

        if model_path is not None:
            state_dict = torch.load(model_path)
        else:
            state_dict = model_zoo.load_url(model_urls['densenet169'])

        for key in list(state_dict.keys()):
            res = pattern.match(key)
            if res:
                new_key = res.group(1) + res.group(2)
                state_dict[new_key] = state_dict[key]
                del state_dict[key]
        model.load_state_dict(state_dict)
    return model
예제 #3
0
def DENSENets(arch, cfg, model_path):
    """
    自定义接口 for model_factory
    :param arch: dense121, dense201, dense161, dense264
    :param cfg:  arch configs
    :param model_path: state_dict.pth
    :return: a blank model or pre-trained model
    """
    pattern = re.compile(
        r'^(.*denselayer\d+\.(?:norm|relu|conv))\.'
        r'((?:[12])\.(?:weight|bias|running_mean|running_var))$')

    model = DenseNet(**cfg)

    state_dict = {}
    if os.path.isfile(model_path):
        print('\n=> loading model.pth from %s.' % model_path)
        state_dict = torch.load(model_path)
    elif model_path == 'download':
        print('\n=> downloading model.pth from %s.' % model_urls[arch])
        state_dict = model_zoo.load_url(model_urls[arch])
    else:
        assert model_path == '', '<model_path> must refer to valid-model.ckpt || ' 'download' ' || "".'

    if state_dict:
        for key in list(state_dict.keys()):
            res = pattern.match(key)
            if res:
                new_key = res.group(1) + res.group(2)
                state_dict[new_key] = state_dict[key]
                del state_dict[key]
        model.load_state_dict(state_dict)
        print('\nSuccess: loaded model.pth from %s.\n' % model_path)

    return model
예제 #4
0
def densenet161(num_classes=17, pretrained=False):
    model = DenseNet(num_init_features=96,
                     growth_rate=48,
                     block_config=(6, 12, 36, 24),
                     num_classes=num_classes)
    if pretrained:
        # load model dictionary
        model_dict = model.state_dict()
        # load pretrained model
        pretrained_dict = model_zoo.load_url(model_urls['densenet161'])
        # update model dictionary using pretrained model without classifier layer
        model_dict.update({
            key: pretrained_dict[key]
            for key in pretrained_dict.keys() if 'classifier' not in key
        })
        model.load_state_dict(model_dict)
    return model
예제 #5
0
파일: ictcf169.py 프로젝트: qianliu1219/iMP
    def __init__(self):
        super(DenseNetModel, self).__init__()

        self.dense_net = DenseNet(growth_rate=32,
                                  block_config=(6, 12, 32, 32),
                                  num_classes=2)

        self.criterion = nn.CrossEntropyLoss()
예제 #6
0
def test_pytorch():
    import torch

    from torchvision.models import DenseNet

    model = DenseNet()
    t = torch.randn(1, 3, 224, 224)
    out = model(t)
    assert out.shape[1] == 1000
예제 #7
0
파일: models.py 프로젝트: LIMr1209/ai_tools
 def __init__(self, NUM_CLASSES):
     super(DenseNet201, self).__init__()
     self.model_name = "DenseNet201"
     self.model = DenseNet(
         num_init_features=64,
         growth_rate=32,
         block_config=(6, 12, 48, 32),
         num_classes=NUM_CLASSES,
     )
예제 #8
0
파일: models.py 프로젝트: LIMr1209/ai_tools
 def __init__(self, NUM_CLASSES):
     super(DenseNet161, self).__init__()
     self.model_name = "DenseNet161"
     self.model = DenseNet(
         num_init_features=96,
         growth_rate=48,
         block_config=(6, 12, 36, 24),
         num_classes=NUM_CLASSES,
     )
예제 #9
0
def dn_bc_100_12_fc():
    model = DenseNet(growth_rate=12,
                     block_config=(16, 16, 16),
                     num_init_features=24,
                     num_classes=100)
    conv = nn.Conv2d(3, 24, kernel_size=3, padding=1, bias=False)
    nn.init.kaiming_normal_(conv.weight)
    model.features.conv0 = conv
    del model.features.norm0, model.features.relu0
    return model
예제 #10
0
def densenet161(pretrained=False, **kwargs):
    if pretrained:
        model = DenseNet(num_init_features=96,
                         growth_rate=48,
                         block_config=(6, 12, 36, 24),
                         **kwargs)
        pattern = re.compile(
            r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$'
        )
        pretrained_state_dict = torch.load(
            './Authority/densenet161-8d451a50.pth'
        )  # load_url函数根据model_urls字典下载或导入相应的预训练模型
        for key in list(pretrained_state_dict.keys()):
            res = pattern.match(key)
            if res:
                new_key = res.group(1) + res.group(2)
                pretrained_state_dict[new_key] = pretrained_state_dict[key]
                del pretrained_state_dict[key]
        now_state_dict = model.state_dict()  # 返回model模块的字典
        pretrained_state_dict.pop('classifier.weight')
        pretrained_state_dict.pop('classifier.bias')
        now_state_dict.update(pretrained_state_dict)
        model.load_state_dict(now_state_dict)
        # 最后通过调用model的load_state_dict方法用预训练的模型参数来初始化你构建的网络结构,
        # 这个方法就是PyTorch中通用的用一个模型的参数初始化另一个模型的层的操作。load_state_dict方法还有一个重要的参数是strict,
        # 该参数默认是True,表示预训练模型的层和你的网络结构层严格对应相等(比如层名和维度)
        return model
    return DenseNet(num_init_features=96,
                    growth_rate=48,
                    block_config=(6, 12, 36, 24),
                    **kwargs)
예제 #11
0
class SalientMapGenerator:
    def __init__(self, smg_config=None):
        if not smg_config:
            smg_config = defaultconfig
        self.cammodel = DenseNet(block_config=(6, 12, 48, 32), num_classes=4)
        self.gbpmodel = DenseNet(block_config=(6, 12, 48, 32), num_classes=4)
        self.cammodel.load_state_dict(
            torch.load(smg_config["pretrain_path"],
                       map_location='cpu')["state_dict"])
        self.gbpmodel.load_state_dict(
            torch.load(smg_config["pretrain_path"],
                       map_location='cpu')["state_dict"])
        self.cammodel.eval()
        self.gbpmodel.eval()
        self.gradcam = GradCam(self.cammodel, "features.denseblock4",
                               smg_config["use_cuda"])
        self.guidebp = GuidedBackpropReLUModel(self.gbpmodel,
                                               smg_config["use_cuda"])
        self.useunet = False
        if smg_config["unet_path"]:
            self.useunet = True
            self.unet = UNet(3, 3)
            self.unet.load_state_dict(
                torch.load(smg_config["unet_path"], map_location='cpu'))
            self.unet.eval()
            if smg_config["use_cuda"]:
                self.unet.cuda()

    def __call__(self, data):

        cam = self.gradcam(data)
        cam /= np.max(cam)
        gbp = self.guidebp(data)
        gbp = torch.sum(gbp, 0).detach().numpy()
        gbp /= np.max(gbp)
        sm = 3 * gbp + 2 * cam
        if self.useunet:
            seg = self.unet(data).cpu()
            seg = torch.sum(seg, 1)[0].detach().numpy()
            seg /= np.max(seg)
            sm += seg
        sm /= np.max(sm)
        return sm
예제 #12
0
    def __init__(self,
                 growth_rate=32,
                 block_config=(6, 12, 24, 16),
                 num_init_features=64,
                 bn_size=4,
                 drop_rate=0.2,
                 num_classes=14):
        super(LungXnet, self).__init__()

        self.model = DenseNet(growth_rate=growth_rate,
                              block_config=block_config,
                              num_init_features=num_init_features,
                              bn_size=bn_size,
                              drop_rate=drop_rate,
                              num_classes=num_classes)

        self.n_filters = self.model.classifier.in_features

        self.model.classifier = nn.Sequential(
            OrderedDict([('linear', nn.Linear(self.n_filters, num_classes)),
                         ('sigmoid', nn.Sigmoid())]))
예제 #13
0
def load_model(model_path):
    device = torch.device('cuda')

    checkpoint = torch.load(model_path,
                            map_location=lambda storage, loc: storage)
    # load checkpoint to CPU and then put to device https://discuss.pytorch.org/t/saving-and-loading-torch-models-on-2-machines-with-different-number-of-gpu-devices/6666

    model = DenseNet(growth_rate=checkpoint["growth_rate"],
                     block_config=checkpoint["block_config"],
                     num_init_features=checkpoint["num_init_features"],
                     bn_size=checkpoint["bn_size"],
                     drop_rate=checkpoint["drop_rate"],
                     num_classes=checkpoint["num_classes"]).to(device)
    model.load_state_dict(checkpoint["model_dict"])

    print(
        f"total params: \t{sum([np.prod(p.size()) for p in model.parameters()])}"
    )

    model.eval()
    return model, device
예제 #14
0
    es = s / (percent + .00001)
    rs = es - s
    return '%s (- %s)' % (asMinutes(s), asMinutes(rs))


#specify if we should use a GPU (cuda) or only the CPU
print(torch.cuda.get_device_properties(gpuid))
torch.cuda.set_device(gpuid)
device = torch.device(f'cuda:{gpuid}' if torch.cuda.is_available() else 'cpu')

# +
#build the model according to the paramters specified above and copy it to the GPU. finally print out the number of trainable parameters

model = DenseNet(growth_rate=growth_rate,
                 block_config=block_config,
                 num_init_features=num_init_features,
                 bn_size=bn_size,
                 drop_rate=drop_rate,
                 num_classes=num_classes).to(device)
#model = DenseNet(growth_rate=32, block_config=(6, 12, 24, 16), #these represent the default parameters
#                 num_init_features=64, bn_size=4, drop_rate=0, num_classes=3)

print(
    f"total params: \t{sum([np.prod(p.size()) for p in model.parameters()])}")

# -


#this defines our dataset class which will be used by the dataloader
class Dataset(object):
    def __init__(self, fname, img_transform=None):
        #nothing special here, just internalizing the constructor parameters
OUTPUT_DIR = args.outdir
resize = args.resize

batch_size = args.batchsize
patch_size = args.patchsize
stride_size = patch_size//2


# ----- load network
device = torch.device(args.gpuid if args.gpuid!=-2 and torch.cuda.is_available() else 'cpu')

checkpoint = torch.load(args.model, map_location=lambda storage, loc: storage) #load checkpoint to CPU and then put to device https://discuss.pytorch.org/t/saving-and-loading-torch-models-on-2-machines-with-different-number-of-gpu-devices/6666

model = DenseNet(growth_rate=checkpoint["growth_rate"], block_config=checkpoint["block_config"],
                 num_init_features=checkpoint["num_init_features"], bn_size=checkpoint["bn_size"],
                 drop_rate=checkpoint["drop_rate"], num_classes=checkpoint["num_classes"]).to(device)

model.load_state_dict(checkpoint["model_dict"])
model.eval()

print(f"total params: \t{sum([np.prod(p.size()) for p in model.parameters()])}")

# ----- get file list

if not os.path.exists(OUTPUT_DIR):
    os.makedirs(OUTPUT_DIR)

files = []
basepath = args.basepath  #
basepath = basepath + os.sep if len(