Esempio n. 1
0
def get_model(model_name):
    if model_name == 'basic':
        model = MyUNet(Config.N_CLASS)

    if model_name == 'basic_b7':
        model = MyUNet_7(Config.N_CLASS)

    if model_name == 'basic_b2':
        model = MyUNet_2(Config.N_CLASS)

    if model_name == "basic_4_mesh":
        model = MyUNet4(5, Config.N_CLASS)
    if model_name == 'basic_4':
        if not Config.FOUR_CHANNEL:
            model = MyUNet4(3, Config.N_CLASS)
        else:
            model = MyUNet4(4, Config.N_CLASS)

    if model_name == "basic_4_dla_34":
        if not Config.FOUR_CHANNEL:
            model = MyUNet4_V2('dla34', 3, Config.N_CLASS)
        else:
            model = MyUNet4_V2('dla34', 4, Config.N_CLASS)

    if model_name == "basic_4_dla_102x":
        if not Config.FOUR_CHANNEL:
            model = MyUNet4_V2('dla102x', 3, Config.N_CLASS)
        else:
            model = MyUNet4_V2('dla102x', 4, Config.N_CLASS)

    if model_name == 'basic_unet':
        if not Config.FOUR_CHANNEL:
            model = UNet(3, Config.N_CLASS)
        else:
            model = UNet(4, Config.N_CLASS)
    if model_name == 'unet':
        model = UNet_EFF("efficientnet-b0", 8)

    if model_name == 'unet_7':
        model = UNet_EFF("efficientnet-b7", 8)
    # if model_name == 'dla34':
    #     model = get_pose_net(34, {"mask": 1, "regr": 7})
    if model_name == 'dla34_2':
        model = get_pose_net(34, {"mp": 1, "xyz": 3, "roll": 4})
    if model_name == 'hourglass':
        model = get_large_hourglass_net(None, {
            "mp": 1,
            "xyz": 3,
            "roll": 4
        }, None)
    if model_name == 'dla102_x':
        model = get_pose_net("102x", {"mp": 1, "xyz": 3, "roll": 4})
    if Config.PARALLEL and str(Config.device) != 'cpu':
        model = torch.nn.DataParallel(model, device_ids=Config.device_ids)
    model = model.to(Config.device)
    return model
Esempio n. 2
0
    def __init__(self):
        self.show_dir = "../showdir_dark_train"
        self.model_dir = "../models_dark"
        ensure_dir(settings.show_dir)
        ensure_dir(settings.model_dir)
        logger.info('set show dir as %s' % "../showdir_dark_train")
        logger.info('set model dir as %s' % "../models_dark")

        self.net = UNet(3, 3).cuda()
        self.dataset = None
        self.dataloader = None
Esempio n. 3
0
class Session:
    def __init__(self):
        self.show_dir = "../showdir_dark_train"
        self.model_dir = "../models_dark"
        ensure_dir(settings.show_dir)
        ensure_dir(settings.model_dir)
        logger.info('set show dir as %s' % "../showdir_dark_train")
        logger.info('set model dir as %s' % "../models_dark")

        self.net = UNet(3, 3).cuda()
        self.dataset = None
        self.dataloader = None

    def get_dataloader(self, dataset_name):
        self.dataset = ShowDataset(dataset_name)
        self.dataloader = \
                    DataLoader(self.dataset, batch_size=1,
                            shuffle=False, num_workers=1)
        return self.dataloader

    def load_checkpoints(self, name):
        ckp_path = os.path.join(self.model_dir, name)
        try:
            obj = torch.load(ckp_path)
            logger.info('Load checkpoint %s' % ckp_path)
        except FileNotFoundError:
            logger.info('No checkpoint %s!!' % ckp_path)
            return
        self.net.load_state_dict(obj['net'])

    def inf_batch(self, name, batch):
        O = batch['O'].cuda()
        O = Variable(O, requires_grad=False)

        with torch.no_grad():
            derain = self.net(O)

        return derain

    def save_image(self, No, imgs):
        for i, img in enumerate(imgs):
            img = (img.cpu().data * 255).numpy()
            img = np.clip(img, 0, 255)
            img = np.transpose(img, (1, 2, 0))
            # h, w, c = img.shape
            # if i == 3:
            img_file = os.path.join(self.show_dir, '%s.png' % (No))
            cv2.imread(
                os.path.join(
                    "D:\\Desktop\\Code\\pytorch\\RESCAN-master\\dataset\\c\\Rain_200_H\\test",
                    '%s.png' % (No)))
            cv2.imwrite(img_file, img)
Esempio n. 4
0
def inference(args, dataloader):
    if str(args.model).lower() == 'fcn32s':
        model = VGG16_FCN32s(n_classes=7)
        model.load_state_dict(
            torch.load(f'{args.model_path}/best_fcn32s.pth',
                       map_location='cpu'))
    elif str(args.model).lower() == 'fcn8s':
        model = VGG16_FCN8s(n_classes=7)
        model.load_state_dict(
            torch.load(f'{args.model_path}/best_fcn8s.pth',
                       map_location='cpu'))
    else:
        model = UNet(n_channels=3, n_classes=7)
        model.load_state_dict(
            torch.load(f'{args.model_path}/best_unet.pth', map_location='cpu'))
    #model = nn.DataParallel(model)
    model.eval()
    model.cuda()

    for idx, (images, path) in enumerate(dataloader):
        b = images.size(0)

        predict = model(images.cuda())
        predict = F.softmax(predict.permute(0, 2, 3, 1), dim=-1)
        predict = torch.argmax(predict, dim=-1)
        predict = predict.cpu().numpy()

        for s in range(b):
            pred_img = np.zeros((512, 512, 3)).astype(np.uint8)
            for c in range(len(class_map)):
                pred_img[predict[s] == c] = class_map[c]
            pred_img = Image.fromarray(pred_img)
            pred_img.save(path[s])
        print(f'\t[{(idx+1)*b}/{len(dataloader.dataset)}]', end='  \r')
Esempio n. 5
0
def get_segment_model_and_criterion(device):
    """
    Create U-NET and changes fully connected layer.
    Parameters
    ----------
    device: torch.device
        Define CPU or GPU will be used for training
    Returns
    -------
    model architecture and criterion in tuple
    """
    model = UNet(n_channels=3, n_classes=1)
    model = model.to(device)
    criterion = dice
    return model, criterion
Esempio n. 6
0
def get_model(model_name):
    if model_name == 'basic':
        model = MyUNet(Config.N_CLASS)
    if model_name == 'basic_unet':
        model = UNet(3, Config.N_CLASS)
    if Config.PARALLEL and str(Config.device) != 'cpu':
        model = torch.nn.DataParallel(model, device_ids=Config.device_ids)
    model = model.to(Config.device)
    return model
Esempio n. 7
0
def main():

    train_dataset = MHP('/root/dataset/LV-MHP-v2/train', n_classes=59)
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=12,
                              shuffle=True,
                              num_workers=0)
    model = UNet(n_channels=3, n_classes=59).cuda()
    optimizer = optim.Adam(model.parameters(), lr=0.001)
    criterion = nn.CrossEntropyLoss()

    writer = tbx.SummaryWriter(log_dir="logs")

    n_epochs = 10000
    for epoch in range(n_epochs):

        train_epoch(train_loader, model, criterion, optimizer, epoch, writer)

        state = {'state_dict': model.state_dict()}
        filename = 'checkpoints/{0:05d}.pth.tar'.format(epoch)
        torch.save(state, filename)
Esempio n. 8
0
def train(args, train_dataloader, valid_dataloader):

    if str(args.model).lower() == 'fcn32s':
        model = VGG16_FCN32s(n_classes=7)
    elif str(args.model).lower() == 'fcn8s':
        model = VGG16_FCN8s(n_classes=7)
    else:
        model = UNet(n_channels=3, n_classes=7)
    #model = nn.DataParallel(model, device_ids=['cuda:0','cuda:1'])
    model.to(args.device)

    # loss
    # 0.79, 0.14, 1.0, 0.73, 2.74, 1.04, 132, 0
    weight = torch.tensor([0.79, 0.14, 1.0, 0.73, 2.74, 1.04, 1.0])
    criterion = nn.CrossEntropyLoss(weight).to(args.device)

    # optim
    optimizer = optim.SGD(model.parameters(),
                          lr=1e-3,
                          momentum=0.9,
                          weight_decay=5e-4)
    if str(args.model) == 'fcn32s':
        milestones = [1, 10, 20, 50]
    elif str(args.model) == 'fcn8s':
        milestones = [1, 10, 20, 60]
    else:
        milestones = [25, 50, 80]
    train_scheduler = optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=milestones, gamma=0.2)  #learning rate decay

    best_iou = 0
    for epoch in range(args.epochs):
        print(f"\tEpoch {epoch}")

        loss, acc, iou = _run_train(args, train_dataloader, model, criterion,
                                    optimizer)
        print("\t train loss:{:.5f}, acc:{:.3f}, iou:{:.2f}".format(
            loss, acc, iou))

        loss, acc, iou = _run_eval(args, valid_dataloader, model, criterion)
        print("\t valid loss:{:.5f}, acc:{:.3f}, iou:{:.2f}".format(
            loss, acc, iou))

        if epoch in milestones:
            torch.save(model.state_dict(),
                       f"./result/{epoch}_{args.model}.pth")
            print('\t [Info] save weights')
        if epoch > milestones[1] and iou > best_iou:
            best_iou = iou
            torch.save(model.state_dict(), f"./result/best_{args.model}.pth")
            print('\t [Info] save weights')
Esempio n. 9
0
def main():
    args = parse_args()
    params = {
        "train_h5": "output/contactmap_20200219_train.h5",
        "batch_size": 1,
        "optim_batch_size": 2,
        "n_input": 1,
        "n_output": 1,
        "bilinear": True,
        "backup_path": "backup/",
        "steps_save_model": 50,
        "project_name": "test",
        "verbose": args.verbose
    }
    if args.model_path is not None:
        model = torch.load(args.model_path)
    else:
        model = UNet(params["n_input"],
                     params["n_output"],
                     bilinear=params["bilinear"])
    contact_net = ContactNet(model, params)
    for i in range(args.epoches):
        contact_net.train()
Esempio n. 10
0
    default_transform = transforms.Compose([
                            transforms.CenterCrop(args.image_size),
                            transforms.Resize(args.image_size),
                            transforms.ToTensor()
                        ])

    # Create train dataset
    train_dataset = dset.ImageFolder(root=args.train_dir, transform=default_transform)

    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size,
                                               shuffle=False, num_workers=args.workers,
                                               pin_memory=True, drop_last=True)

    # Create validation dataset
    valid_dataset = dset.ImageFolder(root=args.valid_dir, transform=default_transform)

    valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=args.batch_size,
                                               shuffle=False, num_workers=args.workers,
                                               pin_memory=True, drop_last=True)

    model_128 = UNet(n_channels=3, n_classes=3, bilinear=True)
    model_128.to(args.device)
    model_128 = load_UNET_weights(model_128, '128', args)
    
    model_256 = UNet(n_channels=3, n_classes=3, bilinear=True)
    model_256.to(args.device)
    model_256 = load_UNET_weights(model_256, '256', args)

    eval_unet_128_256(model_128, model_256, train_loader, 'train', args)
    eval_unet_128_256(model_128, model_256, valid_loader, 'valid', args)
        net.train()
        # 按照batch_size开始训练
        for image, label in train_loader:
            optimizer.zero_grad()
            # 将数据拷贝到device中
            image = image.to(device=device, dtype=torch.float32)
            label = label.to(device=device, dtype=torch.float32)
            # 使用网络参数,输出预测结果
            pred = net(image)
            # 计算loss
            loss = criterion(pred, label)
            print('Loss/train', loss.item())
            # 保存loss值最小的网络参数
            if loss < best_loss:
                best_loss = loss
                torch.save(net.state_dict(), 'best_model.pth')
            # 更新参数
            loss.backward()
            optimizer.step()


if __name__ == "__main__":
    # 选择设备,有cuda用cuda,没有就用cpu
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # 加载网络,图片单通道1,分类为1。
    net = UNet(n_channels=1, n_classes=1)
    # 将网络拷贝到deivce中
    net.to(device=device)
    # 指定训练集地址,开始训练
    data_path = "E:/AI_data/ISBI/data/train/"
    train_net(net, device, data_path)
Esempio n. 12
0
    if not args.output:
        for f in in_files:
            pathsplit = os.path.splitext(f)
            out_files.append("{}_OUT{}".format(pathsplit[0], pathsplit[1]))
    elif len(in_files) != len(args.output):
        logging.error("Input files and output files are not of the same length")
        raise SystemExit()
    else:
        out_files = args.output

    return out_files


if __name__ == '__main__':
    args = get_args()
    # out_files = get_output_filenames(args)

    net = UNet(n_channels=1, n_classes=1)

    logging.info("Loading model {}".format(args.model))

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    logging.info(f'Using device {device}')
    net.to(device=device)
    net.load_state_dict(torch.load(args.model, map_location=device))

    logging.info("Model loaded !")

    # Visualize layer
    plot_weights(net, 'conv12')
Esempio n. 13
0
def train_model(cfg):
    task = Task.init(project_name="Compressed Sensing")
    log.info(cfg.exp.summary)
    log.info(f'{device} detected.')
    log.debug(OmegaConf.to_yaml(cfg))
    os.chdir(hydra.utils.get_original_cwd())
    writer = SummaryWriter(log_dir=f'{cfg.logging.checkpoint_dir}')
    checkpoint_dir = Path(f'{cfg.logging.checkpoint_dir}')
    dump_config(cfg, save_path=checkpoint_dir / 'config.yaml')

    # load dataset and sensing matrix
    mat, channels, weights = load_data(cfg.dataset.panel)
    weights = weights.to(device)

    index = cfg.training.index if cfg.training.index else [i for i in range(mat.shape[1])]

    dataset = xr.load_dataarray(cfg.dataset.src).sel(channels=channels)
    val_set = dataset.sel(fovs=cfg.training.val_fovs).values.astype(np.float32)

    ###
    X_val = torch.from_numpy(dataset.sel(fovs=['Point5']).values.astype(np.float32)).to(device)
    m = torch.from_numpy(mat).float().unsqueeze(-1).unsqueeze(-1).to(device)
    Y_val = F.conv2d(X_val, m)
    X_val = X_val[:, index, :, :]
    target_val = (X_val > 0).float().squeeze()
    target_val = target_val.view(target_val.size(0), -1)  # [C, H * W]
    ###

    if not cfg.training.train_fovs:
        train_set = dataset.drop_sel(fovs=cfg.training.val_fovs).values.astype(np.float32)
    else:
        train_set = dataset.sel(fovs=cfg.training.train_fovs).values.astype(np.float32)

    train_fetcher = Data(train_set, mat, crop_size=cfg.dataset.crop_size, batch_size=cfg.dataset.batch_size,
                         dropout=cfg.dataset.dropout, index=index)
    val_fetcher = Data(val_set, mat, crop_size=cfg.dataset.crop_size, batch_size=cfg.dataset.batch_size, train=False,
                       index=index)

    # set seed
    if cfg.exp.seed:
        random.seed(cfg.exp.seed)
        torch.manual_seed(cfg.exp.seed)
        np.random.seed(cfg.exp.seed)

    model = torch.nn.DataParallel(UNet(**cfg.model.net))
    model = model.to(device)
    model.train()

    optimizer = optim.Adam(model.parameters(), lr=cfg.training.optimizer.lr)

    # define loss functions
    # recon_fn = LossWrapper(nn.MSELoss(reduction='none'), k=cfg.training.top_k,
    #                        weights=weights, use_positive_weights=cfg.training.use_positive_weights)
    # class_fn = LossWrapper(nn.BCELoss(reduction='none'), k=cfg.training.top_k,
    #                        weights=weights, use_positive_weights=cfg.training.use_positive_weights)
    class_fn = BinaryFocalLossWithLogits(**cfg.training.bce)

    if cfg.training.resume:
        log.info("Resume checkpoint from: {}:".format(cfg.training.resume))
        resume_path = utils.to_absolute_path(cfg.training.resume)
        checkpoint = torch.load(resume_path, map_location=lambda storage, loc: storage)
        model.load_state_dict(checkpoint["model"])
        optimizer.load_state_dict(checkpoint["optimizer"])
        global_step = checkpoint["step"]
    else:
        global_step = 0

    for step in range(global_step + 1, cfg.training.n_steps + 1):

        x, y = train_fetcher.get()
        x = x.to(device=device, dtype=torch.float32)
        y = y.to(device=device, dtype=torch.float32)

        optimizer.zero_grad()

        logits, x_hat = model(y)

        binary_target = (x > 0).float()

        pred = (torch.sigmoid(logits) > 0.5).float()
        f1 = 2 * torch.true_divide((binary_target * pred).sum(), (binary_target.sum() + pred.sum()))

        classification_loss = class_fn(logits, binary_target)
        # classification_loss = F.binary_cross_entropy_with_logits(logits, binary_target)
        # classification_loss = class_fn(torch.sigmoid(logits), binary_target)

        # y_hat = F.conv2d(x_hat, train_fetcher.m.to(device))
        # ls_error = F.mse_loss(y_hat, y)
        # recon_loss = F.mse_loss(x_hat, x)
        # recon_loss = recon_fn(x_hat, x)
        # cov_loss = cov_fn(x_hat, x)

        # loss = cfg.training.recon * recon_loss + cfg.training.cov * cov_loss + cfg.training.ls * ls_error
        # loss = cfg.training.recon * recon_loss + cfg.training.ls * ls_error + cfg.training.cl * classification_loss
        loss = classification_loss
        loss.backward()

        if cfg.training.grad_clip > 0:
            torch.nn.utils.clip_grad_value_(model.parameters(), cfg.training.grad_clip)

        optimizer.step()

        # log.info('[{} / {}] | TRAIN loss: {:.2E} | mse: {:.2E} | bce: {:.2E} | f1: {:.2E} | ls: {:.2E}'.format(step,
        #                                                                                                        cfg.training.n_steps,
        #                                                                                                        loss.item(),
        #                                                                                                        recon_loss.item(),
        #                                                                                                        classification_loss.item(),
        #                                                                                                        f1.item(),
        #                                                                                                        ls_error.item()))
        log.info('[{} / {}] | TRAIN loss: {:.2E} | bce: {:.2E} | f1: {:.2E}'.format(step,
                                                                                    cfg.training.n_steps,
                                                                                    loss.item(),
                                                                                    classification_loss.item(),
                                                                                    f1.item()))

        # writer.add_scalar('TRAIN/mse', recon_loss.item(), step)
        writer.add_scalar('TRAIN_LOSS/bce', classification_loss.item(), step)
        writer.add_scalar('TRAIN_ACCURACY/f1', f1.item(), step)
        # writer.add_scalar('TRAIN/ls', ls_error.item(), step)
        # writer.add_scalar('TRAIN/loss_total', loss.item(), step)
        writer.add_scalar('Model/LR', optimizer.param_groups[0]['lr'], step)

        if step % cfg.logging.eval_interval == 0 or step == cfg.training.n_steps - 1 or step == 1:
            val_dice, val_bce = eval_net(model, val_fetcher, class_fn, device)
            log.info('[{} / {}] | VAL bce: {:.2E} | f1: {:.2E}'.format(step, cfg.training.n_steps,
                                                                       val_bce.item(),
                                                                       val_dice.item()))
            # writer.add_scalar('VAL/mse', val_mse.item(), step)
            writer.add_scalar('VAL_LOSS/bce', val_bce.item(), step)
            writer.add_scalar('VAL_ACCURACY/f1', val_dice.item(), step)

            ###
            model.eval()
            with torch.no_grad():
                logits, x_hat = model(Y_val)
            pred = (torch.sigmoid(logits) > 0).float().squeeze()  # [C, H, W]
            pred = pred.view(pred.size(0), -1)  # [C, H * W]
            f1 = 2 * ((target_val * pred).sum(dim=1) / (target_val + pred).sum(dim=1))  # [C,]
            for i in index:
                writer.add_scalar(f'Point5/{channels[i]}', f1[i].sum().item(), step)
                log.info('{} \t {:.2E}'.format(channels[i], f1[i].sum().item()))
            # for score, channel in zip(f1, channels):
            #     writer.add_scalar(f'Point5/{channel}', score.sum().item(), step)
            #     log.info('{} \t {:.2E}'.format(channel, score.sum().item()))
            model.train()
            ###

        if step % cfg.logging.checkpoint_interval == 0 or step == cfg.training.n_steps - 1 or step == 1:
            save_checkpoint(log, model, optimizer, step, checkpoint_dir)
Esempio n. 14
0
    # load datasets
    train_dataset, test_dataset, _ = get_dataset(args)

    # BUILD MODEL
    if args.model == 'cnn':
        # Convolutional neural netork
        if args.dataset == 'mnist':
            global_model = CNNMnist(args=args)
        elif args.dataset == 'fmnist':
            global_model = CNNFashion_Mnist(args=args)
        elif args.dataset == 'cifar':
            global_model = CNNCifarTF(args=args)
        elif args.dataset == 'brats2018':
            from unet.unet_model import UNet
            global_model = UNet(n_channels=1, n_classes=1, bilinear=True)
    elif args.model == 'mlp':
        # Multi-layer preceptron
        img_size = train_dataset[0][0].shape
        len_in = 1
        for x in img_size:
            len_in *= x
            global_model = MLP(dim_in=len_in,
                               dim_hidden=64,
                               dim_out=args.num_classes)
    else:
        exit('Error: unrecognized model')

    # Set the model to train and send it to device.
    global_model.to(device)
    global_model.train()
import glob
import numpy as np
import torch
import os
import cv2
from unet.unet_model import UNet

if __name__ == "__main__":
    # 选择设备,有cuda用cuda,没有就用cpu
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # 加载网络,图片单通道,分类为1。
    net = UNet(n_channels=1, n_classes=1)
    # 将网络拷贝到deivce中
    net.to(device=device)
    # 加载模型参数
    net.load_state_dict(torch.load('best_model.pth', map_location=device))
    # 测试模式
    net.eval()
    # 读取所有图片路径
    tests_path = glob.glob('E:/AI_data/ISBI/data/test/*.png')
    # 遍历所有图片
    for test_path in tests_path:
        # 保存结果地址
        save_res_path = test_path.split('.')[0] + '_res.png'
        # 读取图片
        img = cv2.imread(test_path)
        # 转为灰度图
        img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        # 转为batch为1,通道为1,大小为512*512的数组
        img = img.reshape(1, 1, img.shape[0], img.shape[1])
        # 转为tensor
Esempio n. 16
0
                      exclude_subjects=test_folders)
#                        target_transform=target_transform)
nfdataset_test = NFDataset("/home/michael/nf_dataset",
                           data_transform=data_transform,
                           exclude_subjects=train_folders)
#
sampler = torch.utils.data.sampler.SubsetRandomSampler(
    nfdataset.positive_counts)

dataloader = torch.utils.data.DataLoader(nfdataset,
                                         sampler=sampler,
                                         batch_size=32)

from unet.unet_model import UNet

unet = UNet(1, 1)

n_epochs = 10

n_samples_per_epoch = 100000

all_epoch_avg_losses = []

unet = unet.cuda()

optimizer = torch.optim.Adam(unet.parameters())
import sys

import numpy as np

for e in range(n_epochs):
Esempio n. 17
0
                                          batch_size=args.batchsize,
                                          shuffle=True,
                                          num_workers=4)
testset = HandDataset(args.testjson, transform=transforms)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=args.batchsize,
                                         shuffle=True,
                                         num_workers=4)

torch.manual_seed(1)
if args.gpu is not None:
    torch.cuda.manual_seed(1)
    cudnn.deterministic = True
    cudnn.benchmark = True

model = UNet(3, 31)
print("loaded model!")

if args.gpu is not None:
    model = model.cuda(args.gpu)
    print("model to gpu")
if os.path.isfile(args.checkpoint):
    checkpoint = torch.load(args.checkpoint)
    model.load_state_dict(checkpoint['state_dict'])
    print("loaded checkpoint '{}'".format(args.checkpoint))

#criterion = nn.MSELoss()
criterion = nn.L1Loss()
optimizer = torch.optim.SGD(model.parameters(),
                            args.lr,
                            momentum=args.momentum,
Esempio n. 18
0
def main():
    net = UNet(3, n_classes=3)
    #net.load_state_dict(torch.load("./MODEL.pth"))
    #print("Model loaded.")
    if len(args['snapshot']) == 0:
        # net.load_state_dict(torch.load(os.path.join(ckpt_path, 'cityscapes (coarse)-psp_net', 'xx.pth')))
        curr_epoch = 1
        args['best_record'] = {
            'epoch': 0,
            'iter': 0,
            'val_loss': 1e10,
            'acc': 0,
            'acc_cls': 0,
            'mean_iu': 0,
            'fwavacc': 0
        }
    else:
        print('training resumes from ' + args['snapshot'])
        net.load_state_dict(
            torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'])))
        split_snapshot = args['snapshot'].split('_')
        curr_epoch = int(split_snapshot[1]) + 1
        args['best_record'] = {
            'epoch': int(split_snapshot[1]),
            'iter': int(split_snapshot[3]),
            'val_loss': float(split_snapshot[5]),
            'acc': float(split_snapshot[7]),
            'acc_cls': float(split_snapshot[9]),
            'mean_iu': float(split_snapshot[11]),
            'fwavacc': float(split_snapshot[13])
        }
    net.cuda().train()

    mean_std = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])

    train_joint_transform = joint_transforms.Compose([
        joint_transforms.Scale(args['longer_size']),
        joint_transforms.RandomRotate(10),
        joint_transforms.RandomHorizontallyFlip()
    ])
    sliding_crop = joint_transforms.SlidingCrop(args['crop_size'],
                                                args['stride_rate'],
                                                ignore_label)
    train_input_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])
    val_input_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])
    target_transform = extended_transforms.MaskToTensor()
    visualize = standard_transforms.Compose([
        standard_transforms.Scale(args['val_img_display_size']),
        standard_transforms.ToTensor()
    ])

    train_set = Retinaimages('training',
                             joint_transform=train_joint_transform,
                             sliding_crop=sliding_crop,
                             transform=train_input_transform,
                             target_transform=target_transform)
    train_loader = DataLoader(train_set,
                              batch_size=args['train_batch_size'],
                              num_workers=2,
                              shuffle=True)
    val_set = Retinaimages('validate',
                           transform=val_input_transform,
                           sliding_crop=sliding_crop,
                           target_transform=target_transform)
    val_loader = DataLoader(val_set,
                            batch_size=1,
                            num_workers=2,
                            shuffle=False)

    criterion = CrossEntropyLoss2d(size_average=True).cuda()

    optimizer = optim.SGD([{
        'params': [
            param
            for name, param in net.named_parameters() if name[-4:] == 'bias'
        ],
        'lr':
        2 * args['lr']
    }, {
        'params': [
            param
            for name, param in net.named_parameters() if name[-4:] != 'bias'
        ],
        'lr':
        args['lr'],
        'weight_decay':
        args['weight_decay']
    }],
                          momentum=args['momentum'],
                          nesterov=True)

    if len(args['snapshot']) > 0:
        optimizer.load_state_dict(
            torch.load(
                os.path.join(ckpt_path, exp_name, 'opt_' + args['snapshot'])))
        optimizer.param_groups[0]['lr'] = 2 * args['lr']
        optimizer.param_groups[1]['lr'] = args['lr']

    check_mkdir(ckpt_path)
    check_mkdir(os.path.join(ckpt_path, exp_name))
    open(os.path.join(ckpt_path, exp_name, "_1" + '.txt'),
         'w').write(str(args) + '\n\n')

    train(train_loader, net, criterion, optimizer, curr_epoch, args,
          val_loader, visualize, val_set)
Esempio n. 19
0
if __name__ == '__main__':

    args = get_args()
    print(args)

    have_gpu = torch.cuda.is_available()
    print('Have GPU?:{}'.format(have_gpu))

    writer = SummaryWriter(args.tensorboard)

    # --------------------------- using pre-trained params ---------------------------------- #

    # (1) get param from pre-trained model
    # from unet_3up_ab_toge.unet.unet_model import UNet as UNet_old
    from unet.unet_model import UNet as UNet_old
    net_old = UNet_old(n_channels=3, n_classes=1)
    net_old.load_state_dict(
        torch.load(
            '../load_model_from_step2_add_bd_branch/load_model_from_2ab_fixa/CPxx.pth'
        ))
    net_old_dict = net_old.state_dict()

    # (2) our new model
    net = UNet(n_channels=3, n_classes=1)
    net_dict = net.state_dict()

    # # (3) apply pre-trained params in new model
    net_old_dict = {k: v for k, v in net_old_dict.items() if k in net_dict}
    net_dict.update(net_old_dict)  # update params using pre-trained model
    net.load_state_dict(net_dict)  # update the model
Esempio n. 20
0
            epoch_loss += loss.item()
            itr += 1
            optimizer.zero_grad()
            loss.backward()
            nn.utils.clip_grad_value_(net.parameters(), 0.1)
            optimizer.step()
        epoch_loss = epoch_loss / itr
        val_loss = validate(net, device, criterion, val_loader)  # validate
        train_list.append(epoch_loss)
        valid_list.append(val_loss)
        print('Epoch:%d, train_loss:%.3f, val_loss:%.3f' % (epoch, epoch_loss, val_loss))
    plot(train_list,valid_list)




if __name__ == '__main__':

    net = UNet(1, 1)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    net.to(device)

    train(net, device, '/content/drive/My Drive/dcm_mini',
          epochs=20,
          batch_size=16,
          lr=0.001,
          val_percent=0.2
    )
    test(net, device, '/content/drive/My Drive/dcm_mini')

Esempio n. 21
0
    writer = SummaryWriter(args.tensorboard)


    # --------------------------- using pre-trained params ---------------------------------- #

    # (1) get param from pre-trained model
    # from unet_3up_ab_toge.unet.unet_model import UNet as UNet_old
    # from unet_3up_ab.unet_model import UNet as UNet_old
    from step2_add_bd_branch.unet.unet_model import UNet as UNet_old
    net_old = UNet_old(n_channels=3, n_classes=1)
    net_old.load_state_dict(torch.load('../step2_add_bd_branch/step2_checkpoints/CP196.pth'))
    net_old_dict = net_old.state_dict()

    # (2) our new model
    net = UNet(n_channels=3, n_classes=1)
    net_dict = net.state_dict()

    # # (3) apply pre-trained params in new model
    net_old_dict = {k: v for k, v in net_old_dict.items() if k in net_dict}
    net_dict.update(net_old_dict)  # update params using pre-trained model
    net.load_state_dict(net_dict)  # update the model

    if have_gpu and args.gpu:
        print('Using GPU !')
        net = net.cuda()

    try:
        train_net(image_dir=args.imagedir,
                  label_dir=args.gt,
                  boundary_dir=args.bd,
Esempio n. 22
0
test_data = test_data_amp

test_label_mask = data_amp['test_label_instance']
num_test_instances = len(test_data)

test_data = torch.from_numpy(test_data).type(torch.FloatTensor)
test_label = torch.from_numpy(test_label_mask).type(torch.LongTensor)
# test_data = test_data.view(num_test_instances, 1, -1)
# test_label = test_label.view(num_test_instances, 2)

test_dataset = TensorDataset(test_data, test_label)
test_data_loader = DataLoader(dataset=test_dataset,
                              batch_size=batch_size,
                              shuffle=False)

unet = UNet(n_classes=7)
unet = unet.cuda()

criterion = nn.CrossEntropyLoss(size_average=False).cuda()
optimizer = torch.optim.Adam(unet.parameters(), lr=0.005)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                 milestones=[
                                                     10, 20, 30, 40, 60, 70,
                                                     80, 90, 100, 110, 120,
                                                     130, 140, 150, 160, 170,
                                                     180, 190, 200, 250, 300
                                                 ],
                                                 gamma=0.5)
train_loss = np.zeros([num_epochs, 1])
test_loss = np.zeros([num_epochs, 1])
train_acc = np.zeros([num_epochs, 1])
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               drop_last=True)

    # Create validation dataset

    valid_data = dset.ImageFolder(root=args.valid_dir,
                                  transform=default_transform)
    valid_loader = torch.utils.data.DataLoader(valid_data,
                                               batch_size=64,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               drop_last=True)

    model = UNet(n_channels=3, n_classes=3, bilinear=True)
    model.to(args.device)
    optimizer = torch.optim.Adam(params=model.parameters(),
                                 lr=args.lr,
                                 weight_decay=0)

    state_dict = {'itr': 0}

    if args.resume:
        print('Loading weights & resuming from iteration {}'.format(
            args.checkpoint))
        model, optimizer, logger = load_UNET_checkpoint(
            model, optimizer, '256', args)
        state_dict['itr'] = args.checkpoint

    for epoch in range(args.num_epochs):
Esempio n. 24
0
    writer = SummaryWriter(args.tensorboard)

    # --------------------------- using pre-trained params ---------------------------------- #

    # (1) get param from pre-trained model
    # from unet_3up_area.unet.unet_model import UNet as UNet_old
    # from unet_3up_area.unet_model import UNet as UNet_old
    from step1_area_branch_with_sknet.unet.unet_model import UNet as UNet_old
    net_old = UNet_old(n_channels=3, n_classes=1)
    net_old.load_state_dict(
        torch.load(
            '../step1_area_branch_with_sknet/step1_checkpoints/CP149.pth'))
    net_old_dict = net_old.state_dict()

    # (2) our new model
    net = UNet(n_channels=3, n_classes=1)
    net_dict = net.state_dict()

    # # (3) apply pre-trained params in new model
    net_old_dict = {k: v for k, v in net_old_dict.items() if k in net_dict}
    net_dict.update(net_old_dict)  # update params using pre-trained model
    net.load_state_dict(net_dict)  # update the model

    # for name, param in net.named_parameters():
    #     if param.requires_grad:
    #         print(name)

    # -------------------------- fix parameters related area ---------------------------------- #

    # whether fix parameters related area
    for name, param in net.named_parameters():
Esempio n. 25
0
                                 std=[0.229, 0.224, 0.225])

img_transforms = transforms.Compose([
    transforms.Resize((320, 320)),
    transforms.ToTensor(),
    normalize,
])

gpu = 0
checkpoint_path = "3/checkpoint_80.pth"

torch.manual_seed(1)
if gpu is not None:
    torch.cuda.manual_seed(1)

model = UNet(3, 31)
print("loaded model!")

if gpu is not None:
    model = model.cuda(gpu)
    print("model to gpu")
if os.path.isfile(checkpoint_path):
    checkpoint = torch.load(checkpoint_path)
    model.load_state_dict(checkpoint['state_dict'])
    print("loaded checkpoint {}".format(checkpoint_path))


def main():
    print("model")
    print(model)
    img = Image.open("test/zjf_7.jpg").convert('RGB')
def main():
    """
    Main training loop.
    """
    parser = ArgumentParser()

    parser = UNet.add_model_specific_args(parser)
    parser = Trainer.add_argparse_args(parser)

    args = parser.parse_args()

    prod = bool(os.getenv("PROD"))
    logging.getLogger(__name__).setLevel(logging.INFO)

    if prod:
        logging.info(
            "Training i production mode, disabling all debugging APIs")
        torch.autograd.set_detect_anomaly(False)
        torch.autograd.profiler.profile(enabled=False)
        torch.autograd.profiler.emit_nvtx(enabled=False)
    else:
        logging.info("Training i development mode, debugging APIs active.")
        torch.autograd.set_detect_anomaly(True)
        torch.autograd.profiler.profile(enabled=True,
                                        use_cuda=True,
                                        record_shapes=True,
                                        profile_memory=True)
        torch.autograd.profiler.emit_nvtx(enabled=True, record_shapes=True)

    model = UNet(**vars(args))

    logging.info(
        f"Network:\n"
        f"\t{model.hparams.n_channels} input channels\n"
        f"\t{model.hparams.n_classes} output channels (classes)\n"
        f'\t{"Bilinear" if model.hparams.bilinear else "Transposed conv"} upscaling'
    )

    cudnn.benchmark = True  # cudnn Autotuner
    cudnn.enabled = True  # look for optimal algorithms

    early_stop_callback = EarlyStopping(
        monitor="val_loss",
        min_delta=0.00,
        mode="min",
        patience=10 if not os.getenv("EARLY_STOP") else int(
            os.getenv("EARLY_STOP")),
        verbose=True,
    )

    lr_monitor = LearningRateMonitor()

    run_name = "{}_LR{}_BS{}_IS{}".format(
        datetime.now().strftime("%d-%m-%Y-%H-%M-%S"),
        args.lr,
        args.batch_size,
        args.image_size,
    ).replace(".", "_")

    log_folder = ("./logs" if not os.getenv("DIR_ROOT_DIR") else
                  os.getenv("DIR_ROOT_DIR"))
    if not os.path.isdir(log_folder):
        os.mkdir(log_folder)
    logger = TensorBoardLogger(log_folder, name=run_name)

    checkpoint_callback = ModelCheckpoint(
        monitor='val_loss',
        dirpath='./checkpoints',
        filename='unet-{epoch:02d}-{val_loss:.2f}',
        save_top_k=3,
        mode='min',
    )

    try:
        trainer = Trainer.from_argparse_args(
            args,
            gpus=-1,
            accelerator="ddp",
            plugins=DDPPlugin(find_unused_parameters=False),
            precision=16,
            auto_lr_find="learning_rate"
            if float(os.getenv("LRN_RATE")) == 0.0 else False,
            logger=logger,
            callbacks=[early_stop_callback, lr_monitor, checkpoint_callback],
            accumulate_grad_batches=1.0 if not os.getenv("ACC_GRAD") else int(
                os.getenv("ACC_GRAD")),
            gradient_clip_val=0.0 if not os.getenv("GRAD_CLIP") else float(
                os.getenv("GRAD_CLIP")),
            max_epochs=100 if not os.getenv("EPOCHS") else int(
                os.getenv("EPOCHS")),
            val_check_interval=0.1 if not os.getenv("VAL_INT_PER") else float(
                os.getenv("VAL_INT_PER")),
            default_root_dir=os.getcwd()
            if not os.getenv("DIR_ROOT_DIR") else os.getenv("DIR_ROOT_DIR"),
            fast_dev_run=True
            if os.getenv("FAST_DEV_RUN") == "True" else False,
        )
        if float(os.getenv("LRN_RATE")) == 0.0:
            trainer.tune(model)
        trainer.fit(model)
        trainer.test(model)
    except KeyboardInterrupt:
        torch.save(model.state_dict(), "INTERRUPTED.pth")
        logging.info("Saved interrupt")
        try:
            sys.exit(0)
        except SystemExit:
            os._exit(0)
Esempio n. 27
0
    (options, args) = parser.parse_args()
    return options


if __name__ == '__main__':

    args = get_args()
    print(args)

    writer = SummaryWriter(args.tensorboard)

    have_gpu = torch.cuda.is_available()
    print('Have GPU?:{}'.format(have_gpu))

    net = UNet(n_channels=3, n_classes=1)
    net.eval()

    if have_gpu and args.gpu:
        net = net.cuda()
        print('Using GPU !')

    predict(validate_image_dir=args.imagedir,
            validate_label_dir=args.gt,
            validate_boundary_dir=args.bd,
            checkpoints_dir=args.checkpoint,
            net=net,
            batch_size=args.batchsize,
            gpu=args.gpu)

    ## tensorboard --logdir=./log* --port=8008
Esempio n. 28
0
    (options, args) = parser.parse_args()
    return options


if __name__ == '__main__':

    args = get_args()
    print(args)

    have_gpu = torch.cuda.is_available()
    print('Have GPU?:{}'.format(have_gpu))

    writer = SummaryWriter(args.tensorboard)

    net = UNet(n_channels=3, n_classes=1)

    if have_gpu and args.gpu:
        print('Using GPU !')
        net = net.cuda()

    try:
        train_net(image_dir=args.imagedir,
                  label_dir=args.gt,
                  checkpoint_dir=args.checkpoint,
                  net=net,
                  epochs=args.epochs,
                  batch_size=args.batchsize,
                  lr=args.lr,
                  gpu=args.gpu
                  )
Esempio n. 29
0
from mytransformation_2inputs import ToTensor
import os
from os.path import *
import numpy as np
import scipy.misc
import matplotlib.pyplot as plt

batch_size = 1
test_image_dir = '../test/images/'
test_label_dir = '../test/labels/'
checkpoints_dir = '../checkpoints/'
# save_path = 'test_results/'
# if not exists(save_path):
#     os.mkdir(save_path)

net = UNet(n_channels=3, n_classes=1)
net.cuda()
net.eval()

for checkpoint in range(1, 31):
    net.load_state_dict(
        torch.load(checkpoints_dir + 'CP' + str(5 * checkpoint - 4) + '.pth'))

    transform1 = transforms.Compose([ToTensor()])
    test_dataset = Dataset_unet(test_image_dir,
                                test_label_dir,
                                transform=transform1)
    dataloader = DataLoader(test_dataset, batch_size=batch_size)
    dataset_sizes = len(test_dataset)
    batch_num = int(dataset_sizes / batch_size)
Esempio n. 30
0
import torch as th
import os
import cv2
import numpy as np
from unet.unet_model import UNet
import time

unet = UNet(3, 1).to('cuda')
unet.eval()
unet.load_state_dict(th.load('.\checkpoint\\PersonMasker262.pt'))

evalImagePath = 'E:\Person_detection\Dataset\DataSets2017\\u_net\image'
evalMaskPath = 'E:\Person_detection\Pytorch-UNet\eval\mask_coco'
imgs = [os.path.join(evalImagePath, i) for i in os.listdir(evalImagePath)]
for idx, img_i in enumerate(imgs):
    img = np.expand_dims(np.transpose(cv2.imread(img_i), [2, 0, 1]), 0)
    t1 = time.time()
    mask = unet(th.cuda.FloatTensor(img))
    t2 = time.time()
    mask = cv2.resize(
        np.transpose(np.repeat(mask.detach().cpu().numpy()[0, :, :, :], 3, 0),
                     [1, 2, 0]), (412, 412))
    background = np.zeros_like(mask)
    color = np.ones_like(mask)
    color[:, :, 0] = 150
    color[:, :, 1] = 50
    color[:, :, 2] = 170
    mask = np.where(mask > 0.5, color, background)
    img = np.transpose(img[0, :, :, :], [1, 2, 0])
    mask_img = mask + img
    cv2.imwrite(os.path.join(evalMaskPath, '{}.jpg'.format(idx)), mask_img)