default='resnet18',
                    help='model architecture: ')
parser.add_argument('-a', '--attention', default='', help='attention')
parser.add_argument('path', type=str, help='image path')
parser.add_argument('state_dict_path', type=str, help='state_dict_path')

args = parser.parse_args()

if args.attention == 'se':
    attention_module = 'se_layer'
elif args.attention == 'cbam':
    attention_module = 'cbam_layer'
else:
    attention_module = None

model = modellist.Modellist(args.modelname, args.numclasses, attention_module)
model.load_state_dict(torch.load(args.state_dict_path))

image_path = args.path
img = cv2.imread(image_path, 1)
img = np.float32(cv2.resize(img, (224, 224))) / 255
input = utils.preprocess_image(img)
use_cuda = torch.cuda.is_available()
target_index = None

grad_cam = utils.GradCam(model=model, feature_module=model.layer4, \
                       target_layer_names=["2"], use_cuda=use_cuda)
mask = grad_cam(input, target_index)
utils.show_cam_on_image(img, mask)
gb_model = utils.GuidedBackpropReLUModel(model=model, use_cuda=use_cuda)
gb = gb_model(input, index=target_index)
Example #2
0
def main_worker(args):
    global history_dict
    global best_acc1
    history_path = ''

    device = torch.device('cuda')
    if device is not None:
        print("Use GPU: {} for training".format(device))

    if args.attention == 'se':
        attention_module = 'se_layer'
    elif args.attention == 'cbam':
        attention_module = 'cbam_layer'
    else:
        attention_module = None

    model = modellist.Modellist(args.modelname, args.numclasses,
                                attention_module)

    if not torch.cuda.is_available():
        print('using CPU, this will be slow')
    else:
        model.to(device)

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)

            args.start_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            history_dict = checkpoint['history_dict']
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    best_acc_wts = copy.deepcopy(model.state_dict())

    # Data loading code
    ########################################################################
    transform = transforms.Compose([
        transforms.Resize(32),
        transforms.ToTensor(),
        transforms.Normalize(0.5, 0.5)
    ])

    train_dataset = dsets.MNIST(
        root='MNIST_data/',  # 다운로드 경로 지정
        train=True,  # True를 지정하면 훈련 데이터로 다운로드
        transform=transform,  # 텐서로 변환
        download=True)

    test_dataset = dsets.MNIST(root='MNIST_data/',
                               train=False,
                               transform=transform,
                               download=True)
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=args.batchsize,
                                               shuffle=True)
    val_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                             batch_size=args.batchsize,
                                             shuffle=False)

    #######################################################

    if args.evaluate:
        val(val_loader, model, criterion, device, best_acc_wts)
        return

    for epoch in range(args.start_epoch, args.epochs):

        utils.adjust_learning_rate(optimizer, epoch, args.start_epoch)
        before_best_acc1 = best_acc1

        # train for one epoch
        print('epochs :', epoch + 1, '/', args.epochs)
        train(train_loader, model, criterion, optimizer, device)
        # evaluate on validation set
        best_acc_wts = val(val_loader, model, criterion, device, best_acc_wts)
        model.load_state_dict(best_acc_wts)
        is_best = before_best_acc1 < best_acc1

        utils.save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.modelname,
                'state_dict': model.state_dict(),
                'best_acc1': best_acc1,
                'optimizer': optimizer.state_dict(),
                'history_dict': history_dict,
            }, is_best, history_path)
    if args.hgpath:
        utils.train_graph(args.epochs, history_dict, args.hgpath)
Example #3
0
#main.py
import torch
import albumentations as A
from torch.utils.data import random_split, DataLoader
import numpy as np
import data
import modellist
import argparse
import utils
import torch.optim as optim

device = 'cuda' if torch.cuda.is_available() else 'cpu'
path = '/content/gdrive/My Drive/lgg-mri-segmentation/kaggle_3m'

modellist = modellist.Modellist()

parser = argparse.ArgumentParser(
    description='Learn by Modeling Segmentation DataSet')
parser.add_argument('modelnum', type=int, help='Select your model number')
parser.add_argument("-show",
                    help="show to model Archtecture",
                    action="store_true")
parser.add_argument('lr', type=float, help='Select opimizer learning rate')
parser.add_argument('epochs', type=int, help='Select train epochs')
args = parser.parse_args()

model = modellist(args.modelnum)
model = model.to(device)
criterion = utils.DiceBCELoss()
optimizer = optim.SGD(model.parameters(),
                      lr=args.lr,