Beispiel #1
0
def load_model_and_dataset(model_name):
    if model_name.find('3layers') >= 0:
        model = UNet(channels_in=3, channels_out=1)
        Dataset = Dataset3Layers
    else:
        model = UNet(channels_in=1, channels_out=1)
        Dataset = Dataset1Layer
    model.apply(init_weight)
    return model, Dataset
def train(img_path, ori_seg_path, label_path, ckpt_path, xls_path):
    wb = xlwt.Workbook()
    ws = wb.add_sheet('dice loss')

    model = UNet(channels_in=4, channels_out=1)
    model.apply(init_weight)

    train_set = Dataset4Layers(img_path, ori_seg_path, label_path, 'train')
    train_loader = DataLoader(train_set, batch_size=3, shuffle=True)
    val_set = Dataset4Layers(img_path, ori_seg_path, label_path, 'val')
    val_loader = DataLoader(val_set, batch_size=3, shuffle=False)

    opt = Adam(model.parameters(), lr=1e-4, betas=(0.9, 0.999))
    sch = StepLR(opt, step_size=20, gamma=0.7)
    loss = DiceLoss()

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = model.float().to(device)

    max_epoch = 151
    cnt = 0
    stop_count = 15
    min_dice_loss = 1.
    stop_flag = False
    for i in range(max_epoch):
        dice_loss_train = epoch_step(train_loader, model, opt, loss, 'train',
                                     device)
        dice_loss_val = epoch_step(val_loader, model, opt, loss, 'val', device)
        loss_list = [dice_loss_train, dice_loss_val]
        for j in range(len(loss_list)):
            ws.write(i, j, loss_list[j])

        print(
            f'in epoch{i}: train dice loss is {dice_loss_train}, val dice loss is {dice_loss_val}'
        )

        if dice_loss_val < min_dice_loss:
            min_dice_loss = dice_loss_val
            save_ckpt(ckpt_path, i, model.state_dict())
            cnt = 0
        else:
            cnt = cnt + 1
        if cnt == stop_count:
            stop_flag = True
            break
        sch.step()

    if not stop_flag:
        save_ckpt(ckpt_path, max_epoch - 1, model.state_dict())

    if not os.path.exists(xls_path):
        os.mkdir(xls_path)
    wb.save(os.path.join(xls_path, 'seg_of_rectum_unet.xls'))
Beispiel #3
0
def init_model(opt):
    net_g = UNet(nf=64)
    if opt.load_checkpoint_g is not None:
        net_g.load_state_dict(torch.load(opt.load_checkpoint_g))
    else:
        net_g.apply(weights_init)

    net_r = torchvision.models.resnet18(pretrained=True)
    net_r.fc = torch.nn.Linear(in_features=512, out_features=1)
    if opt.load_checkpoint_r is not None:
        net_r.load_state_dict(torch.load(opt.load_checkpoint_r))
    return net_g, net_r
def main():
    opt = get_opt()
    print(opt)
    print("Start to train stage: %s, named: %s!" % (opt.stage, opt.name))

    n_gpu = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
    opt.distributed = n_gpu > 1
    local_rank = opt.local_rank

    if opt.distributed:
        torch.cuda.set_device(opt.local_rank)
        torch.distributed.init_process_group(backend='nccl', init_method='env://')
        synchronize()

    # create dataset
    dataset = CPDataset(opt)

    # create dataloader
    loader = CPDataLoader(opt, dataset)
    data_loader = torch.utils.data.DataLoader(
        dataset, batch_size=opt.batch_size, shuffle=False,
        num_workers=opt.workers, pin_memory=True, sampler=None)

    # visualization
    if not os.path.exists(opt.tensorboard_dir):
        os.makedirs(opt.tensorboard_dir)


    gmm_model = GMM(opt)
    load_checkpoint(gmm_model, "checkpoints/gmm_train_new/step_020000.pth")
    gmm_model.cuda()

    generator_model = UnetGenerator(25, 4, 6, ngf=64, norm_layer=nn.InstanceNorm2d)
    load_checkpoint(generator_model, "checkpoints/tom_train_new_2/step_070000.pth")
    generator_model.cuda()

    embedder_model = Embedder()
    load_checkpoint(embedder_model, "checkpoints/identity_train_64_dim/step_020000.pth")
    embedder_model = embedder_model.embedder_b.cuda()

    model = UNet(n_channels=4, n_classes=3)
    if opt.distributed:
        model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
    model.apply(utils.weights_init('kaiming'))
    model.cuda()

    if not opt.checkpoint == '' and os.path.exists(opt.checkpoint):
        load_checkpoint(model, opt.checkpoint)

    test_residual(opt, data_loader, model, gmm_model, generator_model)

    print('Finished training %s, nameed: %s!' % (opt.stage, opt.name))
Beispiel #5
0
        os.makedirs(os.path.dirname(save_path))

    torch.save(model.state_dict(), save_path)


if single_gpu_flag(opt):
    board = SummaryWriter(os.path.join('runs', opt.name))

prev_model = create_model(opt)
prev_model.cuda()


model = UNet(n_channels=4, n_classes=3)
if opt.distributed:
    model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.apply(weights_init('kaiming'))
model.cuda()


if opt.use_gan:
    discriminator = Discriminator()
    discriminator.apply(utils.weights_init('gaussian'))
    discriminator.cuda()
    adv_embedder = resnet18(pretrained=True)
    if opt.distributed:
        adv_embedder = torch.nn.SyncBatchNorm.convert_sync_batchnorm(adv_embedder)
    adv_embedder.train()
    adv_embedder.cuda()

if not opt.checkpoint == '' and os.path.exists(opt.checkpoint):
    load_checkpoint(model, opt.checkpoint)
Beispiel #6
0
        print("no pool")  #"no pool" means no pooling do reduce the size
        netG = UNet_nopool(2 * num_slice + 1, 1, act,
                           config.num_slice).to(device)
    else:
        print("pool")
        netG = UNet(2 * num_slice + 1, 1, act, config.num_slice).to(device)
else:
    netG = UNet_orig(2 * num_slice + 1, 1, act, config.num_slice).to(device)

#netG2 is the second generator, you could decide to do stacked generator or not
netG2 = UNet(1, 1, act, 0).to(device)

# Apply the weights_init function to randomly initialize all weights
#  to mean=0, stdev=0.2.
netG.apply(weights_init)
netG2.apply(weights_init)

# Print the model
# print(netG)


class Flatten(nn.Module):
    def forward(self, input):
        return input.view(input.size(0), -1)


class Discriminator(nn.Module):
    def __init__(self, nc, ndf):
        super(Discriminator, self).__init__()
        self.main = nn.Sequential(
            # input is (nc) x 64 x 64
Beispiel #7
0
def main():
    global use_gpu, EVENTS
    # set up some parameters
    batch_size = 2
    lr = 1e-3
    logging_path = 'logging/'
    num_epoches = 100
    epoch_to_save = 10

    # print("# of training samples: %d\n" %int(len(dataset_train)))

    # model= Nowcast(hidden_channels=16,use_gpu=use_gpu)
    model = UNet()
    # model=RadarNet(hidden_layers=16,use_gpu=True, device=0)
    print(model)
    num_params(model)
    model.apply(init_weights)
    # criterion= ComLoss()
    criterion = torch.nn.MSELoss()

    # model.load_state_dict(torch.load('../logging/newest-5_8.pth'))

    if use_gpu:
        model = model.cuda()
        criterion.cuda()

    #optimizer
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    scheduler = MultiStepLR(optimizer, milestones=[40, 80], gamma=0.2)

    #record
    writer = SummaryWriter(logging_path)

    #start training
    model.train()
    step = 0
    for epoch in range(num_epoches):
        start = time.time()

        for param_group in optimizer.param_groups:
            print('learning rate %f' % param_group['lr'])

        for e, event in enumerate(EVENTS[:2]):
            # ====================normal===============#
            dataset_train = DataSet(event=event)
            loader_train = DataLoader(dataset=dataset_train,
                                      num_workers=8,
                                      batch_size=batch_size,
                                      shuffle=True)
            # ====================DALI===============#
            # loader_train = get_iter_dali(event=EVENTS[0], batch_size=2,
            # num_threads=8)

            for i, data in enumerate(loader_train):
                # input size: (4,10,1,200,200)
                # target size: (4,10,1,200,200)
                # ====================normal===============#
                # input_train=Variable(torch.rand(size=(1,10,1,200,200)))
                # target_train=Variable(torch.ones(size=(1,10,1,200,200)))

                input_train = data[0].squeeze(axis=2)
                target_train = data[1][:, :, :, :, :].squeeze(axis=2)
                # ====================DALI===============#
                # data= data[0]
                # input_train=data['inputs']
                # target_train=data['target']
                optimizer.zero_grad()
                # if model.radarnet.predictor.Who.weight.grad is not None:
                # print('before backward gradient: ', model.radarnet.predictor.Who.weight.grad.max())

                # model.zero_grad()

                # print(input_train.size())
                input_train = normalizer(input_train)
                # target_train= normalizer(target_train)
                input_train, target_train = Variable(input_train), Variable(
                    target_train)
                if use_gpu:
                    input_train, target_train = input_train.cuda(
                    ), target_train.cuda()

                out_train = model(input_train)
                loss = criterion(target_train, out_train)

                loss.backward()
                # if model.radarnet.predictor.Who.weight.grad is not None:
                # print('after backward gradient: ', model.radarnet.predictor.Who.weight.grad.max())
                # print('gradient: ', model.predictor.U_z.weight.grad.max())
                # print('gradient: ', model.predictor.W_r.weight.grad.max())
                # print('gradient: ', model.predictor.U_r.weight.grad.max())
                # print('gradient: ', model.predictor.W_c.weight.grad.max())
                # print('gradient: ', model.predictor.U_c.weight.grad.max())

                optimizer.step()

                # output_train= torch.clamp(out_train, 0, 1)
                # ================NORMAL================ #
                print("[epoch %d/%d][event %d/%d][step %d/%d]  obj: %.4f " %
                      (epoch + 1, num_epoches, e, len(EVENTS), i + 1,
                       len(loader_train), loss.item()))
                # print("[epoch %d/%d][step %d/%d]  obj: %.4f "%(epoch+1,num_epoches,  i+1,len(loader_train),loss.item()))
                # ================DALI================ #
                # print("[epoch %d/%d][event %d/%d][step %d]  obj: %.4f "%(epoch+1,num_epoches,e, len(EVENTS), i+1,-loss.item()))

                # print(list(model.parameters()))
                if step % 10 == 0:
                    writer.add_scalar('loss', -loss.item())

                step += 1

    #save model
        if epoch % epoch_to_save == 0:
            torch.save(
                model.state_dict(),
                os.path.join(logging_path, 'net_epoch%d.pth' % (epoch + 1)))
        end = time.time()
        print('One epoch costs %.2f minutes!' % ((end - start) / 60.))

        scheduler.step(epoch)

    torch.save(model.state_dict(), os.path.join(logging_path, 'newest.pth'))
    draw_loss(loss1, epochs)
    draw_dice(dmean, epochs)
    draw_accuracy(accmean, epochs)
    draw_f1(f1mean, epochs)


if __name__ == '__main__':
    # log basic configurations
    logging.basicConfig(level=logging.INFO,
                        format='%(levelname)s: %(message)s')
    args = get_args()
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    logging.info(f'Using device {device}')

    net = UNet(1, 1)
    net.apply(init_weights)

    if args.load:
        net.load_state_dict(torch.load(args.load, map_location=device))
        logging.info(f'Model loaded from {args.load}')
    net.to(device=device)

    train_net(net1=net,
              epochs=args.epochs,
              batch_size=args.batchsize,
              lr=args.lr,
              device1=device,
              img_scale=args.scale,
              val_percent=args.val / 100)