Ejemplo n.º 1
0
def load_model(key: str):
    if key == BRAIN_DENOISING_MODEL:
        checkpoint = torch.load(BRAIN_DENOISING_MODEL)
        unet = UNet(in_channels=1, n_classes=1, depth=3, wf=8, padding=True,
                    batch_norm=False, up_mode='upconv', grid=True, bias=True)
        unet.load_state_dict(checkpoint["model_state_dict"])
        return unet

    if key == BRAIN_SEG_MODEL:
        checkpoint = torch.load(BRAIN_SEG_MODEL)
        unet = UNet(in_channels=1, n_classes=1, depth=3, wf=8, padding=True,
                    batch_norm=False, up_mode='upconv', grid=False, bias=True)
        unet.load_state_dict(checkpoint["model_state_dict"])

    if key == ABDOM_DENOISING_MODEL:
        checkpoint = torch.load(ABDOM_DENOISING_MODEL)
        unet = UNet(in_channels=1, n_classes=1, depth=3, wf=8, padding=True,
                    batch_norm=False, up_mode='upconv', grid=True, bias=True)
        unet.load_state_dict(checkpoint["model_state_dict"])

    if key == ABDOM_SEG_MODEL:
        checkpoint = torch.load(ABDOM_SEG_MODEL)
        unet = UNet(in_channels=1, n_classes=1, depth=3, wf=8, padding=True,
                    batch_norm=False, up_mode='upconv', grid=False, bias=True)
        unet.load_state_dict(checkpoint["model_state_dict"])

    return unet
Ejemplo n.º 2
0
def main(conf):
    device = "cuda:0" if torch.cuda.is_available() else 'cpu'
    beta_schedule = "linear"
    beta_start = 1e-4
    beta_end = 2e-2
    n_timestep = 1000

    conf.distributed = dist.get_world_size() > 1

    transform = transforms.Compose(
        [
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True),
        ]
    )

    train_set = MultiResolutionDataset(
        conf.dataset.path, transform, conf.dataset.resolution
    )
    train_sampler = dist.data_sampler(
        train_set, shuffle=True, distributed=conf.distributed
    )
    train_loader = conf.training.dataloader.make(train_set, sampler=train_sampler)

    model = UNet(
        conf.model.in_channel,
        conf.model.channel,
        channel_multiplier=conf.model.channel_multiplier,
        n_res_blocks=conf.model.n_res_blocks,
        attn_strides=conf.model.attn_strides,
        dropout=conf.model.dropout,
        fold=conf.model.fold,
    )
    model = model.to(device)
    ema = UNet(
        conf.model.in_channel,
        conf.model.channel,
        channel_multiplier=conf.model.channel_multiplier,
        n_res_blocks=conf.model.n_res_blocks,
        attn_strides=conf.model.attn_strides,
        dropout=conf.model.dropout,
        fold=conf.model.fold,
    )
    ema = ema.to(device)

    if conf.distributed:
        model = nn.parallel.DistributedDataParallel(
            model,
            device_ids=[dist.get_local_rank()],
            output_device=dist.get_local_rank(),
        )

    optimizer = conf.training.optimizer.make(model.parameters())
    scheduler = conf.training.scheduler.make(optimizer)

    betas = make_beta_schedule(beta_schedule, beta_start, beta_end, n_timestep)
    diffusion = GaussianDiffusion(betas).to(device)

    train(conf, train_loader, model, ema, diffusion, optimizer, scheduler, device)
Ejemplo n.º 3
0
    def __init__(self,
                 cur_dir,
                 suffix='.tif',
                 cuda=True,
                 testBatchSize=4,
                 batchSize=4,
                 nEpochs=200,
                 lr=0.01,
                 threads=4,
                 seed=123,
                 size=256,
                 input_transform=True,
                 target_transform=True):
        #        super(TrainModel, self).__init__()

        self.data_dir = cur_dir + '/data/'
        self.suffix = suffix
        """
        training parameters are set here

        """
        self.colordim = 1
        self.cuda = cuda
        if self.cuda and not torch.cuda.is_available():
            raise Exception("No GPU found, please run without --cuda")
        self.testBatchSize = testBatchSize
        self.batchSize = batchSize
        self.nEpochs = nEpochs
        self.lr = lr
        self.threads = threads
        self.seed = seed
        self.size = size

        self.input_transform = input_transform
        self.target_transform = target_transform
        self.__check_dir = cur_dir + '/checkpoint'
        if not exists(self.__check_dir):
            os.mkdir(self.__check_dir)
        self.__epoch_dir = cur_dir + '/epoch'
        if not exists(self.__epoch_dir):
            os.mkdir(self.__epoch_dir)
        """
        initialize the model
        """

        if self.cuda:
            self.unet = UNet(self.colordim).cuda()
            self.criterion = nn.MSELoss().cuda()
        else:
            self.unet = UNet(self.colordim)
            self.criterion = nn.MSELoss()

        self.optimizer = optim.SGD(self.unet.parameters(),
                                   lr=self.lr,
                                   momentum=0.9,
                                   weight_decay=0.0001)
Ejemplo n.º 4
0
def get_score():
    ckpt_dir = os.listdir(
        '/media/muyun99/DownloadResource/dataset/opends-Supervisely Person Dataset/checkpoints'
    )
    Unet_dir = [
        "25_noise", "50_noise", "75_noise", "25_noise_pro", "50_noise_pro",
        "75_noise_pro", "75_noise_pro_finetune"
    ]
    UNet_Pick_dir = ["75_noise_pro_QAM", "75_noise_pro_QAM_finetune"]
    UNet_Pick_cbam_dir = ["75_noise_pro_QAM_cbam_finetune"]
    for dir in ckpt_dir:
        print(dir)
        flag = "unet_pick"
        if dir in Unet_dir:
            net = UNet(n_classes=1, n_channels=3)
            flag = "unet"
        elif dir in UNet_Pick_dir:
            net = UNet_Pick(n_classes=1, n_channels=3)
        elif dir in UNet_Pick_cbam_dir:
            net = UNet_Pick_cbam(n_classes=1, n_channels=3)
        else:
            continue
        true_dir = os.path.join(
            '/media/muyun99/DownloadResource/dataset/opends-Supervisely Person Dataset/checkpoints',
            dir)
        try:
            best_ckpt_path = get_best_checkpoint(net, true_dir, flag)
            test_score = get_test_score(net, best_ckpt_path, flag)
            print(f'{dir} best test score is {test_score}')
        except Exception as ex:
            print(f'{dir} error')
            print(f'出现异常 {ex}')
            continue
Ejemplo n.º 5
0
def train(network_specs,
          training_params,
          image_path,
          save_path,
          ckpt_path,
          epoch=10):

    print('creating datapipe...')
    # create images DataPipeline
    datapipe = DataPipeline(image_path=image_path,
                            training_params=training_params)

    print('creating network model...')
    # create model VAE
    model = UNet(network_specs=network_specs,
                 datapipe=datapipe,
                 training_params=training_params)

    # train the model
    # save_config is flexible
    print('''
=============
 HERE WE GO
=============
''')
    model.train(save_path=save_path, ckpt_path=ckpt_path, epoch=epoch)
Ejemplo n.º 6
0
def main(args):
    train_dataloader, test_dataloader = dataloader.load_datasets(
                                         batch_size=args.batch_size,
                                         image_resize=args.image_resize,
                                         train_dataset_size=args.train_data_size,
                                         test_dataset_size=args.test_data_size,
                                         download=args.download_dataset
                                         )
    
    model = UNet(out_channels=21)
    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    ce_weight = utils.get_weight(train_dataloader.dataset)
    if len(ce_weight) < 21:
        criterion = nn.CrossEntropyLoss()
    else:
        criterion = nn.CrossEntropyLoss(utils.get_weight(train_dataloader.dataset))

    print(f'Start training for {args.epochs} epochs')
    train(model=model,
          dataloader=train_dataloader,
          epochs=args.epochs,
          optimizer=optimizer,
          criterion=criterion,
          save_output_every=1,
          )

    print(f'Training finished')
    print(f'Start evaluating with {len(test_dataloader.dataset)} images')

    eval(model, test_dataloader)

    print('All done')
Ejemplo n.º 7
0
def main():

    FLAGS = parser.parse_args()

    # Loading train and test data.
    # load_data function exists in file utils.py
    print("Loading dataset.")
    X_train, y_train = load_data(FLAGS.dataset_dir + '/train', FLAGS.img_size,
                                 FLAGS.augment_data)
    X_test, y_test = load_data(FLAGS.dataset_dir + '/test', FLAGS.img_size,
                               FLAGS.augment_data)

    # Making sure that the data was loaded successfully.
    print("Train set image size : ", X_train.shape)
    print("Train set label size : ", y_train.shape)
    print("Test set image size : ", X_test.shape)
    print("Test set label size : ", y_test.shape)
    print("Dataset loaded successfully.")

    # Creating a unet object.
    # class UNet exists in file model.py
    unet = UNet(FLAGS.img_size)

    # Training the network, printing the loss value for every epoch
    # , and the accuracy on the test set after the training is complete
    train_loss_values, test_loss_values = unet.train(X_train, y_train, X_test,
                                                     y_test, FLAGS.num_epochs,
                                                     FLAGS.learning_rate,
                                                     FLAGS.model_save_dir)

    # Plotting loss values on train and test set, and saving it as an image Loss.png
    # plot_loss exists in file utils.py
    plot_loss(train_loss_values, test_loss_values, 'Loss.png')
 def __init__(self, opt):
     self.opt = opt
     if opt.inference:
         self.testset = TestImageDataset(fdir=opt.impaths['test'],
                                         imsize=opt.imsize)
     else:
         self.trainset = ImageDataset(fdir=opt.impaths['train'],
                                      bdir=opt.impaths['btrain'],
                                      imsize=opt.imsize,
                                      mode='train',
                                      aug_prob=opt.aug_prob,
                                      prefetch=opt.prefetch)
         self.valset = ImageDataset(fdir=opt.impaths['val'],
                                    bdir=opt.impaths['bval'],
                                    imsize=opt.imsize,
                                    mode='val',
                                    aug_prob=opt.aug_prob,
                                    prefetch=opt.prefetch)
     self.model = UNet(n_channels=3,
                       n_classes=1,
                       bilinear=self.opt.use_bilinear)
     if opt.checkpoint:
         self.model.load_state_dict(
             torch.load('./state_dict/{:s}'.format(opt.checkpoint),
                        map_location=self.opt.device))
         print('checkpoint {:s} has been loaded'.format(opt.checkpoint))
     if opt.multi_gpu == 'on':
         self.model = torch.nn.DataParallel(self.model)
     self.model = self.model.to(opt.device)
     self._print_args()
Ejemplo n.º 9
0
def main():
    args = parser.parse_args()
    
    dataset = SyntheticCellDataset(arg.img_dir, arg.mask_dir)
    
    indices = torch.randperm(len(dataset)).tolist()
    sr = int(args.split_ratio * len(dataset))
    train_set = torch.utils.data.Subset(dataset, indices[:-sr])
    val_set = torch.utils.data.Subset(dataset, indices[-sr:])
    
    train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, shuffle=True, pin_memory=True)
    val_loader = torch.utils.data.DataLoader(val_set, batch_size=args.batch_size, shuffle=False, pin_memory=True)
    
    device = torch.device("cpu" if not args.use_cuda else "cuda:0")
    
    model = UNet()
    model.to(device)
    
    dsc_loss = DiceLoss()
    
    optimizer = torch.optim.Adam(model.parameters(), args.lr)
    
    val_overall = 1000
    for epoch in args.N_epoch:
        model, train_loss, optimizer = train(model, train_loader, device, optimizer)
        val_loss = validate(model, val_loader, device)
        
        if val_loss < val_overall:
            save_checkpoint(args.model_save_dir + '/epoch_'+str(epoch+1), model, train_loss, val_loss, epoch)
            val_overall = val_loss
            
        print('[{}/{}] train loss :{} val loss : {}'.format(epoch+1, num_epoch, train_loss, val_loss))
    print('Training completed)
Ejemplo n.º 10
0
def load_model(data, model_path, cuda=True):

    if cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")

    unet = UNet()

    if cuda:
        unet = unet.cuda()

    if not cuda:
        unet.load_state_dict(
            torch.load(model_path, map_location=lambda storage, loc: storage))
    else:
        unet.load_state_dict(torch.load(model_path))

    if cuda:
        data = Variable(data.cuda())
    else:
        data = Variable(data)
    data = torch.unsqueeze(data, 0)

    output = unet(data)
    if cuda:
        output = output.cuda()

    return output
Ejemplo n.º 11
0
def main():
    # width_in = 284
    # height_in = 284
    # width_out = 196
    # height_out = 196
    # PATH = './unet.pt'
    # x_train, y_train, x_val, y_val = get_dataset(width_in, height_in, width_out, height_out)
    # print(x_train.shape, y_train.shape, x_val.shape, y_val.shape)

    batch_size = 3
    epochs = 1
    epoch_lapse = 50
    threshold = 0.5
    learning_rate = 0.01
    unet = UNet(in_channel=1, out_channel=2)
    if use_gpu:
        unet = unet.cuda()
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(unet.parameters(), lr=0.01, momentum=0.99)
    if sys.argv[1] == 'train':
        train(unet, batch_size, epochs, epoch_lapse, threshold, learning_rate,
              criterion, optimizer, x_train, y_train, x_val, y_val, width_out,
              height_out)
        pass
    else:
        if use_gpu:
            unet.load_state_dict(torch.load(PATH))
        else:
            unet.load_state_dict(torch.load(PATH, map_location='cpu'))
        print(unet.eval())
Ejemplo n.º 12
0
def train(epochs, batch_size, learning_rate):

    train_loader = torch.utils.data.DataLoader(SegThorDataset(
        "data",
        phase='train',
        transform=transforms.Compose([Rescale(0.25),
                                      Normalize(),
                                      ToTensor()]),
        target_transform=transforms.Compose([Rescale(0.25),
                                             ToTensor()])),
                                               batch_size=batch_size,
                                               shuffle=True)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = UNet().to(device)
    model.apply(weight_init)
    #optimizer = optim.Adam(model.parameters(), lr=learning_rate)    #learning rate to 0.001 for initial stage
    optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.95)
    #optimizer = adabound.AdaBound(params = model.parameters(), lr = 0.001, final_lr = 0.1)

    for epoch in range(epochs):
        print('Epoch {}/{}'.format(epoch + 1, epochs))
        print('-' * 10)

        running_loss = 0.0
        loss_seg = np.zeros(5)

        for batch_idx, (train_data, labels) in enumerate(train_loader):
            train_data, labels = train_data.to(
                device, dtype=torch.float), labels.to(device,
                                                      dtype=torch.uint8)

            print("train data size", train_data.size())
            print("label size", labels.size())
            optimizer.zero_grad()
            output = model(train_data)

            print("output: {} and taget: {}".format(output.size(),
                                                    labels.size()))
            loss_label, loss = dice_loss(output, labels)
            loss.backward()
            optimizer.step()

            running_loss += loss.item()

            for i in range(4):
                loss_seg[i] += loss_label[i]

        print("Length: ", len(train_loader))
        epoch_loss = running_loss / len(train_loader)
        epoch_loss_class = np.true_divide(loss_seg, len(train_loader))
        print(
            "Dice per class: Background = {:.4f} Eusophagus = {:.4f}  Heart = {:.4f}  Trachea = {:.4f}  Aorta = {:.4f}\n"
            .format(epoch_loss_class[0], epoch_loss_class[1],
                    epoch_loss_class[2], epoch_loss_class[3],
                    epoch_loss_class[4]))
        print("Total Dice Loss: {:.4f}\n".format(epoch_loss))

    os.makedirs("models", exist_ok=True)
    torch.save(model, "models/model.pt")
Ejemplo n.º 13
0
def main():
    train_root_dir = '/content/drive/My Drive/DDSM/train/CBIS-DDSM'
    test_root_dir = '/content/drive/My Drive/DDSM/test/CBIS-DDSM'
    path_weights = '/content/drive/My Drive/Cv/weights'
    batch_size = 3
    valid_size = 0.2
    nb_epochs = 20
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # data loaders
    loaders = dataloaders(train_root_dir, combined_transform, batch_size, valid_size)

    model = UNet(in_channels=3, out_channels=1)
    model.to(device)
    optimizer = optim.Adam(model.parameters(), lr=0.01)
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=4, gamma=0.3)

    model = train(model, optimizer, exp_lr_scheduler, loaders, nb_epochs, device, path_weights)
    # from torchsummary import summary
    #
    # summary(model, input_size=(3, 224, 224))

    # test_transform = transforms.Compose([
    #     transforms.ToTensor(),
    #     transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    # ])
    test_loader = DataLoader(
        MassSegmentationDataset(test_root_dir, combined_transform),
        batch_size=batch_size,
        num_workers=0
    )

    test(model, test_loader, device)
Ejemplo n.º 14
0
def train(network_specs, training_params, image_path, save_path, ckpt_path):

    print('creating datapipe...')
    # create images DataPipeline
    datapipe = DataPipeline(image_path=image_path,
                            training_params=training_params)

    print('creating network model...')
    # create model VAE
    model = UNet(network_specs=network_specs,
                 datapipe=datapipe,
                 training_params=training_params,
                 mode='evaluating')

    # train the model
    # save_config is flexible
    print('''
=============
 HERE WE GO
=============
''')
    images, labels, preds = model.evaluate(ckpt_path=ckpt_path)

    for i in range(labels.shape[0]):
        # plt.subplots(figsize=[16,12])
        for j in range(3):
            plt.subplot(2, 6, 2 * j + 1)
            plt.imshow(labels[i, :, :, j])
            plt.subplot(2, 6, 2 * j + 2)
            plt.imshow(preds[i, :, :, j])

        plt.subplot(2, 6, 7)
        plt.imshow(np.squeeze(images[i]))
        plt.show()
Ejemplo n.º 15
0
def main():

    FLAGS = parser.parse_args()

    # Calculate the predictions for all the images in the input_img_dir.
    for img_name in os.listdir(FLAGS.input_img_dir):

        if img_name.endswith('.png'):
            original_img = load_img(
                FLAGS.input_img_dir + '/' +
                img_name)  # load_img function exists in file utils.py

            # Resizing image because of the small memory size
            input_img = tl.prepro.imresize(original_img,
                                           [FLAGS.img_size, FLAGS.img_size])
            input_img = np.reshape(input_img,
                                   [1, FLAGS.img_size, FLAGS.img_size, 3])

            unet = UNet(FLAGS.img_size)

            # The output is an array of the size(img_size * img_size, 1)
            prediction = unet.predict(input_img, FLAGS.model_save_dir)

            # Saving the image given the probabilities
            # save_img fnuction exists in file utils.py
            save_img(
                prediction, original_img, FLAGS.img_size, FLAGS.input_img_dir +
                '/' + img_name.split('.')[0] + '_pred.png')
Ejemplo n.º 16
0
def train():
    ex = wandb.init(project="PQRST-segmentation")
    ex.config.setdefaults(wandb_config)

    logging.basicConfig(level=logging.INFO,
                        format="%(levelname)s: %(message)s")
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    logging.info(f'Using device {device}')

    net = UNet(in_ch=1, out_ch=4)
    net.to(device)

    try:
        train_model(net=net,
                    device=device,
                    batch_size=wandb.config.batch_size,
                    lr=wandb.config.lr,
                    epochs=wandb.config.epochs)
    except KeyboardInterrupt:
        try:
            save = input("save?(y/n)")
            if save == "y":
                torch.save(net.state_dict(), 'net_params.pkl')
            sys.exit(0)
        except SystemExit:
            os._exit(0)
Ejemplo n.º 17
0
def train():
    # Load the data sets
    train_dataset = NucleusDataset(
        "data",
        train=True,
        transform=Compose([Rescale(256), ToTensor()]),
        target_transform=Compose([Rescale(256), ToTensor()]))

    # Use cuda if available
    device = "cuda" if torch.cuda.is_available() else "cpu"

    # Set model to GPU/CPU
    if args.from_checkpoint:
        model = UNet.load(args.from_checkpoint)
    else:
        model = UNet()
    model.to(device)

    # Initialize optimizer
    optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)

    # Initialize trainer
    trainer = Trainer(dataset=train_dataset,
                      model=model,
                      optimizer=optimizer,
                      batch_size=args.batch_size,
                      device=args.device,
                      output_dir=output_dir)

    # Run the training
    trainer.run_train_loop(epochs=args.epochs)
Ejemplo n.º 18
0
def see_results(n_channels, n_classes, load_weights, dir_img, dir_cmp, savedir,
                title):
    # Use GPU or not
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")

    # Create the model
    net = UNet(n_channels, n_classes).to(device)
    net = torch.nn.DataParallel(
        net, device_ids=list(range(torch.cuda.device_count()))).to(device)

    # Load old weights
    checkpoint = torch.load(load_weights, map_location='cpu')
    net.load_state_dict(checkpoint['state_dict'])

    # Load the dataset
    loader = get_dataloader_show(dir_img, dir_cmp)

    # If savedir does not exists make folder
    if not os.path.exists(savedir):
        os.makedirs(savedir)

    net.eval()
    with torch.no_grad():
        for (data, gt) in loader:
            # Use GPU or not
            data, gt = data.to(device), gt.to(device)

            # Forward
            predictions = net(data)

            save_image(predictions, savedir + title + "_pred.png")
            save_image(gt, savedir + title + "_gt.png")
Ejemplo n.º 19
0
    def __init__(self, n_channels, n_classes):
        super(UNet_Pick_cbam, self).__init__()
        self.n_channels = n_channels
        self.n_classes = n_classes
        self.Unet = UNet(n_classes=n_classes, n_channels=n_channels)

        self.QAM = implement.QAM_cbam()
        self.OCM = implement.OCM()
Ejemplo n.º 20
0
 def build_model(self):
     """Create a model"""
     self.model = UNet(self.in_dim, self.out_dim, self.num_filters)
     self.model = self.model.float()
     self.optimizer = torch.optim.Adam(self.model.parameters(),
                                       self.lr, [self.beta1, self.beta2],
                                       weight_decay=self.weight_decay)
     self.print_network(self.model, 'unet')
     self.model.to(self.device)
Ejemplo n.º 21
0
def main():
    """
    Training.
    """
    global start_epoch, epoch, checkpoint

    # Initialize model or load checkpoint
    if checkpoint is None:
        model = UNet(in_channels, out_channels)
        # Initialize the optimizer
        optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad,
                                                   model.parameters()),
                                     lr=lr)
    else:
        checkpoint = torch.load(checkpoint)
        start_epoch = checkpoint['epoch'] + 1
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']

    # Move to default device
    model = model.to(device)
    criterion = nn.L1Loss().to(device)

    # Custom dataloaders
    train_loader = torch.utils.data.DataLoader(TripletDataset(
        train_folder, crop_size, scale),
                                               batch_size=batch_size,
                                               shuffle=True,
                                               num_workers=workers,
                                               pin_memory=True)
    test_loader = torch.utils.data.DataLoader(TripletDataset(
        test_folder, crop_size, scale),
                                              batch_size=batch_size,
                                              shuffle=True,
                                              num_workers=workers,
                                              pin_memory=True)

    # Total number of epochs to train for
    epochs = int(iterations // len(train_loader) + 1)

    # Epochs
    for epoch in range(start_epoch, epochs):
        # One epoch's training
        train(train_loader=train_loader,
              model=model,
              criterion=criterion,
              optimizer=optimizer,
              epoch=epoch,
              epochs=epochs)
        test(test_loader=test_loader, model=model, criterion=criterion)

        # Save checkpoint
        torch.save({
            'epoch': epoch,
            'model': model,
            'optimizer': optimizer
        }, f'checkpoints/checkpoint_unet_{epoch}.pth.tar')
Ejemplo n.º 22
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument("--action",
                        type=str,
                        default='train',
                        help="train or test")
    args = parser.parse_args()

    config = load_config()

    # 使用tensorboard
    time_now = datetime.now().isoformat()

    if not os.path.exists(config.RUN_PATH):
        os.mkdir(config.RUN_PATH)
    writer = SummaryWriter(log_dir=config.RUN_PATH)

    # 随机数种子
    torch.manual_seed(config.SEED)
    torch.cuda.manual_seed(config.SEED)
    np.random.seed(config.SEED)
    random.seed(config.SEED)

    # INIT GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(str(e) for e in config.GPU)
    if torch.cuda.is_available():
        config.DEVICE = torch.device("cuda")
        print('\nGPU IS AVAILABLE')
        torch.backends.cudnn.benchmark = True
    else:
        config.DEVICE = torch.device("cpu")

    net = UNet(2).to(config.DEVICE)
    print(list(torchvision.models.resnet18(False).children())[7])

    optimizer = optim.Adam(net.parameters(), betas=(0.5, 0.999), lr=config.LR)
    loss = nn.L1Loss()

    # 加载数据集
    if args.action == 'train':

        train_dataset = LABDataset(config, config.TRAIN_PATH)
        len_train = len(train_dataset)
        train_loader = torch.utils.data.DataLoader(
            train_dataset, batch_size=config.BATCH_SIZE, shuffle=True)
        iter_per_epoch = len(train_loader)
        train_(config, train_loader, net, optimizer, loss, len_train,
               iter_per_epoch, writer)

    if args.action == "test":

        test_dataset = LABDataset(config, config.TEST_PATH)
        test_loader = torch.utils.data.DataLoader(test_dataset,
                                                  batch_size=1,
                                                  shuffle=False)
        test(config, test_loader, net, loss)
Ejemplo n.º 23
0
def main():
    train_transform = A.Compose([
        A.Resize(height=config.IMAGE_HEIGHT, width=config.IMAGE_WIDTH),
        A.Rotate(limit=35, p=1.0),
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.1),
        A.normalize(mean=[0.0, 0.0, 0.0],
                    std=[1.0, 1.0, 1.0],
                    max_pixel_value=255.0),
        ToTensorV2,
    ])

    val_transform = A.Compose([
        A.Resize(height=config.IMAGE_HEIGHT, width=config.IMAGE_WIDTH),
        A.normalize(mean=[0.0, 0.0, 0.0],
                    std=[1.0, 1.0, 1.0],
                    max_pixel_value=255.0),
        ToTensorV2,
    ])

    model = UNet(in_channels=3, out_channels=1).to(config.DEVICE)
    loss_fn = nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(model.parameters(), lr=config.LEARNING_RATE)

    train_loader, val_loader = get_loaders(
        config.TRAIN_IMAGE_DIR,
        config.TRAIN_MASK_DIR,
        config.VAL_IMG_DIR,
        config.VAL_MASK_DIR,
        config.BATCH_SIZE,
        train_transform,
        val_transform,
    )

    if config.LOAD_MODEL:
        load_checkpoint(torch.load('my_checkpoint.pth.tar'), model)

    scaler = torch.cuda.amp.GradScaler()
    for epoch in range(config.NUM_EPOCHS):
        train_fn(train_loader, model, optimizer, loss_fn, scaler)

        # save model
        checkpoint = {
            'state_dict': model.state_dict(),
            'optimizer': optimizer.state_dict(),
        }
        save_checkpoint(checkpoint)

        # check acc
        check_accuracy(val_loader, model, device=config.DEVICE)

        # print some examples to a folder
        save_predictions_as_imgs(val_loader,
                                 model,
                                 folder='saved_images',
                                 device=config.DEVICE)
Ejemplo n.º 24
0
def main(tocsv=False, save=False, mask=False, valid_train=False, toiou=False):
    model_name = config['param']['model']
    resize = not config['valid'].getboolean('pred_orig_size')

    if model_name == 'unet_vgg16':
        model = UNetVgg16(3, 1, fixed_vgg=True)
    elif model_name == 'dcan':
        model = DCAN(3, 1)
    elif model_name == 'caunet':
        model = CAUNet()
    elif model_name == 'camunet':
        model = CAMUNet()
    else:
        model = UNet()

    if torch.cuda.is_available():
        model = model.cuda()
        # model = torch.nn.DataParallel(model).cuda()

    # Sets the model in evaluation mode.
    model.eval()

    epoch = load_ckpt(model)
    if epoch == 0:
        print("Aborted: checkpoint not found!")
        return

    # prepare dataset
    compose = Compose(augment=False, resize=resize)
    data_dir = 'data/stage1_train' if valid_train else 'data/stage1_test'
    dataset = KaggleDataset(data_dir, transform=compose)
    iter = predict(model, dataset, compose, resize)

    if tocsv:
        with open('result.csv', 'w') as csvfile:
            writer = csv.writer(csvfile)
            writer.writerow(['ImageId', 'EncodedPixels'])
            for uid, _, y, y_c, y_m, _, _, _, _ in iter:
                for rle in prob_to_rles(y, y_c, y_m):
                    writer.writerow([uid, ' '.join([str(i) for i in rle])])
    elif toiou and valid_train:
        with open('iou.csv', 'w') as csvfile:
            writer = csv.writer(csvfile)
            writer.writerow(['ImageId', 'IoU'])
            for uid, _, y, y_c, y_m, gt, _, _, _ in tqdm(iter):
                iou = get_iou(y, y_c, y_m, gt)
                writer.writerow([uid, iou])
    else:
        for uid, x, y, y_c, y_m, gt, gt_s, gt_c, gt_m in tqdm(iter):
            if valid_train:
                show_groundtruth(uid, x, y, y_c, y_m, gt, gt_s, gt_c, gt_m,
                                 save)
            elif mask:
                save_mask(uid, y, y_c, y_m)
            else:
                show(uid, x, y, y_c, y_m, save)
Ejemplo n.º 25
0
    def __init__(self, model):
        
        self.model = model

        if self.model == 'U-Net':
            self.network = UNet()
        elif self.model == 'R2U-Net':
            self.network = R2UNet()
        elif self.model == 'IterNet':
            self.network = IterNet()
Ejemplo n.º 26
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument('--file_paths', default="data/files.txt")
    parser.add_argument('--landmark_paths', default="data/landmarks.txt")
    parser.add_argument('--landmark', type=int, default=0)
    parser.add_argument('--save_path')
    parser.add_argument('--num_epochs', type=int, default=int(1e9))
    parser.add_argument('--log_freq', type=int, default=100)
    parser.add_argument('--separator', default=",")
    parser.add_argument('--batch_size', type=int, default=8)
    args = parser.parse_args()

    file_paths = args.file_paths
    landmark_paths = args.landmark_paths
    landmark_wanted = args.landmark
    num_epochs = args.num_epochs
    log_freq = args.log_freq
    save_path = args.save_path

    x, y = get_data(file_paths,
                    landmark_paths,
                    landmark_wanted,
                    separator=args.separator)
    print(f"Got {len(x)} images with {len(y)} landmarks")

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print("device", device)

    dataset = TensorDataset(torch.Tensor(x), torch.Tensor(y))
    dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True)

    unet = UNet(in_dim=1, out_dim=6, num_filters=4)
    criterion = torch.nn.CrossEntropyLoss(weight=get_weigths(y))
    optimizer = optim.SGD(unet.parameters(), lr=0.001, momentum=0.9)

    unet.to(device)

    for epoch in range(num_epochs):
        running_loss = 0.0
        for i, data in enumerate(dataloader):
            inputs, labels = data
            optimizer.zero_grad()

            outputs = unet(inputs)
            loss = criterion(outputs, labels.long())
            loss.backward()
            optimizer.step()

            running_loss += loss.item()
        print(f"[{epoch+1}/{num_epochs}] loss: {running_loss}")
        if epoch % log_freq == log_freq - 1:
            if save_path is not None:
                torch.save(unet.state_dict(),
                           os.path.join(save_path, f"unet-{epoch}.pt"))
def init_model(param):

    fusion = UNet(n_channels=1, n_classes=1, bilinear=True).to(param['device'])
    model = FrontStaticModel().to(param['device'])
    model = torch.load('./static_camerabased_20')
    print('*** Model loads successfully ***')

    for param_ in model.parameters():
        param_.requires_grad = False

    param['model'] = (model, fusion)
Ejemplo n.º 28
0
def main(argv):
    data = get_data(FLAGS.train_data, FLAGS.num_classes)
    train_data = data.batch(16, drop_remainder=True)

    model = UNet(num_classes=FLAGS.num_classes)
    model.compile(optimizer=tf.keras.optimizers.Adam(),
                  loss='binary_crossentropy')
    model.fit(train_data, epochs=25)

    for index, (image, label) in enumerate(data.batch(1).take(5)):
        prediction = model.predict(image)
        plot_result(f'results/{index}.png', image, label, prediction)
Ejemplo n.º 29
0
def main_UNet_II():
   # TODO: Get through CLI arg
    # Step 01: Get Input Resources and Model Configuration
    parser = app_argparse()
    args = parser.parse_args()
    # print(args)

    use_gpu = args.use_gpu
    # tile_size = args.tile_size
    tile_size = (200, 200)

    INPUT_IMAGE_PATH = args.input_RGB
    LABEL_IMAGE_PATH = args.input_GT
    # WEIGHTS_FILE_PATH = args.output_model_path
    WEIGHTS_FILE_PATH = "weights/Adam.UNet.weights.II.pt"
    OUTPUT_IMAGE_PATH = args.output_images

    # Step 02: Get Input Resources and Model Configuration
    device = utils.device(use_gpu=use_gpu)
    model = UNet()
    # model = utils.load_weights_from_disk(model)
    model = utils.load_entire_model(model, WEIGHTS_FILE_PATH, use_gpu)
    print("use pretrained model!")
    # print(model)
    # summary(model, (3, tile_size[0], tile_size[1]))

    # this is issue !!!
    loader = dataset.full_image_loader(
        INPUT_IMAGE_PATH, LABEL_IMAGE_PATH, tile_size=tile_size)

    prediction = predict(model, loader, device=device,
                         class_label=ClassLabel.house)

    # Step 03: save the output
    input_image = utils.input_image(INPUT_IMAGE_PATH)
    pred_image, mask_image = utils.overlay_class_prediction(
        input_image, prediction)

    pred_image_path = OUTPUT_IMAGE_PATH + "/prediction.png"
    pred_image.save(pred_image_path)

    pred_mask_path = OUTPUT_IMAGE_PATH + "/mask.png"
    mask_image.save(pred_mask_path)

    print("(i)    Prediction and Mask image saved at {}".format(pred_image_path))
    print("(ii)   Mask image saved at {}".format(pred_mask_path))

    # Step 04: Check the metrics

    img_gt = np.array(Image.open(LABEL_IMAGE_PATH), dtype=np.int32)
    img_mask = np.array(Image.open(pred_mask_path), dtype=np.int32)

    metricComputation(img_gt, img_mask)
Ejemplo n.º 30
0
def main():
    unet = UNet(inifile='setting.ini')
    ds = dataset.Dataset()

    fiji_path = os.path.join(unet.fiji_dir)
    ij = imagej.init(fiji_path, headless=False)
    print(ij.getVersion())
    ij.batchmode = True

    if not os.path.exists(unet.output_path):
        os.mkdir(unet.output_path)

    Process3DOC(ij, unet)