Ejemplo n.º 1
0
def main():

    opt = parser.parse_args()
    print(opt)

    cuda = opt.cuda
    if cuda:
        print("=> use gpu id: '{}'".format(opt.gpus))
        os.environ["CUDA_VISIBLE_DEVICES"] = str(opt.gpus)
        if not torch.cuda.is_available():
            raise Exception(
                "No GPU found or Wrong GPU id, please run without --cuda")

    print("===> Loading datasets")

    dataset = BasicDataset(dir_img,
                           dir_mask,
                           transform=transforms.Compose([
                               transforms.ToTensor(),
                               transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                                    std=[0.229, 0.224, 0.225])
                           ]))
    training_data_loader = DataLoader(dataset=dataset, num_workers=opt.threads, \
        batch_size=opt.batchSize, shuffle=True)

    if opt.vgg_loss:
        print("===> Loading VGG model")
        netVGG = models.vgg19()
        netVGG.load_state_dict(
            model_zoo.load_url(
                'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth'))

        class _content_model(nn.Module):
            def __init__(self):
                super(_content_model, self).__init__()
                self.feature = nn.Sequential(
                    *list(netVGG.features.children())[:-1])

            def forward(self, x):
                out = self.feature(x)
                return out

        netContent = _content_model()
    print("===> Building model")
    model = NetG()
    criterion = nn.MSELoss()

    print("===> Setting Optimizer")
    optimizer = optim.Adam(model.parameters(), lr=opt.lr)

    print("===> Training")
    for epoch in range(opt.start_epoch, opt.nEpochs + 1):
        try:
            os.makedirs('Output/{}'.format(epoch))
        except Exception as e:
            print(e)

        train(training_data_loader, optimizer, model, criterion, epoch, opt,
              netContent)
Ejemplo n.º 2
0
    def __init__(self, model: nn.Module, checkpoint, image_net, random_samples,
                 indices, images_dir, use_cpu, name, image_resize, **kwargs):
        self.checkpoint = checkpoint
        self.image_net = image_net
        self.random_samples = random_samples
        self.indices = indices
        self.image_resize = image_resize

        self.device = common.get_device(cpu_force=use_cpu)
        self.model = model
        self.images_dir = images_dir
        self.use_cpu = use_cpu
        self.name = name

        ch = get_checkpoint(checkpoint, image_net)
        if ch is not None:
            model.load_state_dict(ch, strict=False)
        else:
            print('Using random weights!')

        self.dataset = BasicDataset(labels_csv=dataset_path(
            prop('datasets.test')),
                                    transforms=self.get_image_transforms(),
                                    img_dir=self.images_dir)

        self.mask_loader = MaskLoader(preset_attr=self.dataset.labels(),
                                      ts=self.get_image_transforms())

        self.label_to_index_total = {}
        for i, name in enumerate(self.dataset.labels()):
            self.label_to_index_total[name] = i

        if self.random_samples is None and self.indices is None:
            raise AttributeError(
                'Expected one of `indices` or `random_samples`')

        if self.indices is None and self.random_samples is not None:
            self.indices = np.random.random_integers(low=0,
                                                     high=len(self.dataset) -
                                                     1,
                                                     size=self.random_samples)
Ejemplo n.º 3
0
def segment_img(img_path, net, device, scale_factor=1, out_threshold=0.5):
    """ 
    Segement out the mask (target vein region) of the input image (specified by image path) 
    """
    print(f"\nPerforming segmentation on ---> {img_path} ...")

    img = torch.from_numpy(BasicDataset.preprocess(Image.open(img_path).convert('L'), scale_factor))
    img = img.unsqueeze(0) # add dimension
    img = img.to(device=device, dtype=torch.float32)

    begin_time = time.time()
    with torch.no_grad():
        output = net(img)
    end_time = time.time()
    inference_time = end_time - begin_time
    print(f'inference_time: {inference_time}s')

    # transform to image numpy array
    if net.n_classes > 1:
        probs = F.softmax(output, dim=1)
    else:
        probs = torch.sigmoid(output)
    probs = probs.squeeze(0) #cuda tensor
    
    tf = transforms.Compose(
        [
            transforms.ToPILImage(),
            # transforms.Resize(full_img.size[1]),
            transforms.ToTensor()
        ]
    )
    probs = tf(probs.cpu())
    full_mask = probs.squeeze().cpu().numpy()
    if np.count_nonzero(full_mask) == 0:
        print("No veins segmented out on this image!")

    return full_mask > out_threshold, inference_time
Ejemplo n.º 4
0
from dataset import BasicDataset
from torch.utils.data import DataLoader, random_split
from dynamic_dataloader import RestrictedDataset, BasicDataset
from fetch_data_for_next_phase import get_pool_data

# ailab
dir_img = '/data.local/all/hangd/src_code_3/Pytorch-UNet/data/imgs/'
dir_mask = '/data.local/all/hangd/src_code_3/Pytorch-UNet/data/masks/'

dir_img_test = '/data.local/all/hangd/src_code_3/Pytorch-UNet/data_test/imgs/'
dir_mask_test = '/data.local/all/hangd/src_code_3/Pytorch-UNet/data_test/masks/'

dir_img_draft = '/DATA/hangd/cardi/RobustSegmentation/data_draft/imgs/'
dir_mask_draft = '/DATA/hangd/cardi/RobustSegmentation/data_draft/masks/'

pool_data = get_pool_data("data_one32nd_category.json")
# dataset = RestrictedDataset(dir_img, dir_mask, pool_data, True)
dataset = BasicDataset(dir_img, dir_mask, pool_data)
pool_loader = DataLoader(dataset,
                         batch_size=16,
                         shuffle=True,
                         num_workers=1,
                         pin_memory=True)

batch = next(iter(pool_loader))
img = batch['image']
mask = batch['mask']
id = batch['id']
print("train_loader: ", img.shape, mask.shape)
print("id: ", id)
Ejemplo n.º 5
0
# GPU 사용 가능일 경우 랜덤 시드 고정
if device == 'cuda':
    torch.cuda.manual_seed_all(777)
    
learning_rate = 1e-4
training_epochs = 500
batch_size = 10

os.chdir('C:/Users/CSD/Desktop/CPR_project/cnn')

from dataset import BasicDataset

os.chdir('C:/Users/CSD/Desktop/CPR_project/cnn')

mitdb_train = BasicDataset(root_dir='./data_mV', train=True)
data_loader = torch.utils.data.DataLoader(dataset=mitdb_train,
                                          batch_size=batch_size,
                                          shuffle=True,
                                          drop_last=True)
mitdb_test = BasicDataset(root_dir='./data_mV', train=False)
data_loader_test = torch.utils.data.DataLoader(dataset=mitdb_test,
                                          batch_size=batch_size,
                                          shuffle=True,
                                          drop_last=True)

class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__() 
        self.layer = nn.Sequential(
            nn.Conv1d(1, 3, 5),
from dataset import BasicDataset
from torch.utils.data import DataLoader, random_split

dir_img = r"D:\BaiduNetdiskDownload\gen_img"
dir_mask = r"D:\BaiduNetdiskDownload\gen_mask"
dir_checkpoint = r"D:\PycharmsProjects\UNET+BLR\checkPoints"

classes = 5
epochs = 5
global_step = 0
batch_size = 4
lr = 0.001

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

dataset = BasicDataset(dir_img, dir_mask)
n_val = int(len(dataset) * 0.1)
n_train = len(dataset) - n_val
train, val = random_split(dataset, [n_train, n_val])
train_loader = DataLoader(train, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val, batch_size=batch_size, shuffle=False)

# writer = SummaryWriter(comment=f'LR_{lr}_BS_{batch_size}_SCALE_{1}')

net = UNet(n_channels=3, n_classes=classes, bilinear=True)
net = net.cuda()

optimizer = optim.Adam(net.parameters(), lr=lr, weight_decay=1e-8)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(
    optimizer, 'min' if net.n_classes > 1 else 'max', patience=2)
Ejemplo n.º 7
0
        return [image, mask]


# use same transform for train/val for this example
trans = transforms.Compose(
    [transforms.Resize((768, 768)),
     transforms.ToTensor()])

# train_set = SimDataset(2000, transform=trans)
# val_set = SimDataset(200, transform=trans)

dir_img = 'data/imgs/'
dir_mask = 'data/masks/'

dataset = BasicDataset(dir_img, dir_mask, trans, scale=0.5)
val_percent = 0.1
n_val = int(len(dataset) * val_percent)
n_train = len(dataset) - n_val
train_set, val_set = random_split(dataset, [n_train, n_val])
batch_size = 1

image_datasets = {'train': train_set, 'val': val_set}

dataloaders = {
    'train':
    DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=0),
    'val':
    DataLoader(val_set, batch_size=batch_size, shuffle=True, num_workers=0)
}
Ejemplo n.º 8
0
def train_net(
    args,
    net,
    device,
    epochs=5,
    batch_size=1,
    lr=0.1,
    val_percent=0.1,
    save_cp=True,
    img_scale=0.5,
    n_classes=2,
    class_weights=[1, 1],
):

    dataset = BasicDataset(dir_img, dir_mask, img_scale, n_classes)
    n_val = int(len(dataset) * val_percent)
    n_train = len(dataset) - n_val
    train, val = random_split(dataset, [n_train, n_val])
    train_loader = DataLoader(train,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=8,
                              pin_memory=True)
    val_loader = DataLoader(val,
                            batch_size=batch_size,
                            shuffle=False,
                            num_workers=8,
                            pin_memory=True)

    writer = SummaryWriter(
        comment=f'LR_{lr}_BS_{batch_size}_SCALE_{img_scale}')
    global_step = 0

    logging.info(f'''Starting training:
        Epochs:          {epochs}
        Batch size:      {batch_size}
        Learning rate:   {lr}
        Training size:   {n_train}
        Validation size: {n_val}
        Checkpoints:     {save_cp}
        Device:          {device.type}
        Images scaling:  {img_scale}
    ''')

    args.training_size = n_train
    args.validation_size = n_val

    argparse_dict = vars(args)
    with open('./runs/config.list', 'w') as f:
        f.write(json.dumps(argparse_dict, indent=4, sort_keys=True))

    # optimizer = optim.RMSprop(net.parameters(), lr=lr, weight_decay=1e-8)
    optimizer = optim.Adam(net.parameters(), lr=lr)
    if net.n_classes > 1:
        criterion = nn.CrossEntropyLoss(weight=torch.Tensor(class_weights).to(
            device=device))
    else:
        criterion = nn.BCEWithLogitsLoss()

    if os.path.exists('./runs/log.csv'):
        os.remove('./runs/log.csv')
    f = open('./runs/log.csv', 'a')

    for epoch in range(epochs):
        net.train()

        epoch_loss = 0
        with tqdm(total=n_train,
                  desc=f'Epoch {epoch + 1}/{epochs}',
                  unit='img') as pbar:
            for batch in train_loader:
                imgs = batch['image']
                true_masks = batch['mask']
                assert imgs.shape[1] == net.n_channels, \
                    f'Network has been defined with {net.n_channels} input channels, ' \
                    f'but loaded images have {imgs.shape[1]} channels. Please check that ' \
                    'the images are loaded correctly.'

                imgs = imgs.to(device=device, dtype=torch.float32)
                mask_type = torch.float32 if net.n_classes == 1 else torch.long
                true_masks = true_masks.to(device=device, dtype=mask_type)

                masks_pred = net(imgs)

                loss = criterion(masks_pred, true_masks)
                epoch_loss += loss.item()
                writer.add_scalar('Loss/train', loss.item(), global_step)

                pbar.set_postfix(**{'loss (batch)': loss.item()})

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                tot = 0.
                for true_mask, pred in zip(true_masks, masks_pred):
                    pred = (pred > 0.5).float()
                    tot += dice_coeff(pred, true_mask.squeeze(dim=1)).item()
                acc = tot / len(imgs)

                writer.add_scalar('Acc/train', acc, global_step)

                log = [epoch] + [loss.item()] + [acc] + [' ']
                log = map(str, log)
                f.write(','.join(log) + '\n')

                pbar.update(imgs.shape[0])
                global_step += 1

                if global_step % (len(dataset) // (10 * batch_size)) == 0:
                    val_score = eval_net(net, val_loader, device, n_val)
                    if net.n_classes > 1:
                        logging.info(
                            'Validation cross entropy: {}'.format(val_score))
                        writer.add_scalar('Loss/val', val_score, global_step)

                    else:
                        logging.info(
                            'Validation Dice Coeff: {}'.format(val_score))
                        writer.add_scalar('Acc/val', val_score, global_step)

                    writer.add_images('images', imgs, global_step)
                    if net.n_classes == 1:
                        writer.add_images('masks/true', true_masks,
                                          global_step)
                        writer.add_images('masks/pred',
                                          torch.sigmoid(masks_pred) > 0.5,
                                          global_step)

                    log = [epoch] + [' '] + [' '] + [val_score]
                    log = map(str, log)
                    f.write(','.join(log) + '\n')

                    net.train()

        if save_cp:
            try:
                os.mkdir(dir_checkpoint)
                logging.info('Created checkpoint directory')
            except OSError:
                pass
            torch.save(net.state_dict(),
                       dir_checkpoint + f'CP_epoch{epoch + 1}.pth')
            logging.info(f'Checkpoint {epoch + 1} saved !')

    writer.close()
Ejemplo n.º 9
0
def get_data_statistics(data_path,
                        model,
                        dims=DIMS,
                        cuda=True,
                        batch_size=BATCH_SIZE):

    # Create temporary dump directory

    tag = get_tag(data_path, dims)
    fname = os.path.join(STATISTICS_DIR, tag + '.npz')

    # Check if statistics exist

    if os.path.exists(fname):

        stats = np.load(fname)

        m, s, num = stats['m'], stats['s'], stats['num_samples']

    else:
        temp_path = os.path.join(TEMP_DIR, tag) + '/'

        if not os.path.exists(temp_path):
            os.mkdir(temp_path)

            # Load all images and convert to save as 0-255 .png or .jpg (normalize per image)

            dataset = BasicDataset(model_dir=data_path)
            dataloader = torch.utils.data.DataLoader(dataset,
                                                     batch_size=batch_size,
                                                     num_workers=4,
                                                     shuffle=False)

            num = 0

            for batch in dataloader:

                batch = batch.detach().cpu().numpy()
                batch = ((batch + 1) / 2 * 255).astype(np.uint8)

                for i in range(batch.shape[0]):

                    img = np.tile(batch[i].transpose(1, 2, 0), (1, 1, 3))
                    imsave(os.path.join(temp_path, f'img_{num}.png'), img)

                    num += 1
        else:
            num = len(os.listdir(temp_path))

        if cuda:
            model.cuda()

        m, s = fid_score._compute_statistics_of_path(temp_path,
                                                     model,
                                                     batch_size=batch_size,
                                                     cuda=cuda,
                                                     dims=dims)

        # Remove dump folder
        rmtree(temp_path)

        np.savez(fname, m=m, s=s, num_samples=num)

    return m, s, num
Ejemplo n.º 10
0
    net = UNet(n_channels=1, n_classes=1).to(
        device)  # we are using gray images and only have one labelled class
    if args.pretrained_weights:
        net.load_state_dict(torch.load(args.pretrained_weights))
        print(f'Pretrained weights loaded from {args.load}')
    print(
        f' Using device {device}\n Network:\n \t{net.n_channels} input channels\n \t{net.n_classes} output channels (classes)\n \t{"Bilinear" if net.bilinear else "Dilated conv"} upscaling'
    )
    # faster convolutions, but more memory
    # cudnn.benchmark = True

    ## --- Set up data
    imgs_dir = 'data/imgs/' + args.dataset_name
    masks_dir = 'data/masks/' + args.dataset_name
    print(f'imgs_dir: {imgs_dir} masks_dir: {masks_dir}')
    dataset = BasicDataset(imgs_dir, masks_dir, args.down_scale)
    n_val = int(len(dataset) * args.valid_ratio)
    n_train = len(dataset) - n_val
    print(f'n_val: {n_val} n_train: {n_train}')
    train, val = random_split(
        dataset,
        [n_train, n_val])  #split into train dataset and validation dataset
    train_loader = DataLoader(train,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=1,
                              pin_memory=True)
    val_loader = DataLoader(val,
                            batch_size=args.batch_size,
                            shuffle=False,
                            num_workers=1,
Ejemplo n.º 11
0
            net = ClassifierModule10_2path(backbone10=args.backbone_10, backbone3=args.backbone_10, fcnn=False)
        if args.setup == 'two_path_fcnn':
            net = ClassifierModule10_2path(backbone10=args.backbone_10, backbone3=args.backbone_10, fcnn=True)
        if args.setup == 'single_path':
            net = ClassifierModule10(backbone=args.backbone_10)
    # net = BaseLine()
    net.to(device=device)
    weigths = torch.load(args.weights, map_location=device)
    if args.n_classes == 3:
        weigths, total_size_new = quantize_weights(weigths, 4)
        # weigths = pruning(weigths, 0.009, total_size_new)
    net.load_state_dict(weigths)
    # torch.quantization.quantize_dynamic(
    #     net,  # the original model
    #     {torch.nn.Linear},  # a set of layers to dynamically quantize
    #     dtype=torch.qint8)  # the target dtype for quantized weights
    net.eval()
    # test data loader
    dataset_test = BasicDataset(args.data_dir, args.features_dir, test_df, args.n_classes, test=True,
                                augmentations='without')
    test_loader = DataLoader(dataset_test, batch_size=args.batch_size, shuffle=False, num_workers=8, pin_memory=True,
                             drop_last=False)
    prediction, labels = get_prediction(test_loader, net, device, args.setup)
    plot_results(prediction, labels, args.output_dir, rec_device='all', n_classes=args.n_classes)
    rec_devices = test_df.apply(lambda x: x['filename'].split('/')[1].split('-')[-1].replace('.wav', ''), axis=1)
    for rec_device in rec_devices.unique():
        prediction_partial = prediction[rec_devices == rec_device]
        labels_partial = labels[rec_devices == rec_device]
        plot_results(prediction_partial, labels_partial, args.output_dir, rec_device=rec_device,
                     n_classes=args.n_classes)
Ejemplo n.º 12
0
class BaseTestModel:
    def __init__(self, model: nn.Module, checkpoint, image_net, random_samples,
                 indices, images_dir, use_cpu, name, image_resize, **kwargs):
        self.checkpoint = checkpoint
        self.image_net = image_net
        self.random_samples = random_samples
        self.indices = indices
        self.image_resize = image_resize

        self.device = common.get_device(cpu_force=use_cpu)
        self.model = model
        self.images_dir = images_dir
        self.use_cpu = use_cpu
        self.name = name

        ch = get_checkpoint(checkpoint, image_net)
        if ch is not None:
            model.load_state_dict(ch, strict=False)
        else:
            print('Using random weights!')

        self.dataset = BasicDataset(labels_csv=dataset_path(
            prop('datasets.test')),
                                    transforms=self.get_image_transforms(),
                                    img_dir=self.images_dir)

        self.mask_loader = MaskLoader(preset_attr=self.dataset.labels(),
                                      ts=self.get_image_transforms())

        self.label_to_index_total = {}
        for i, name in enumerate(self.dataset.labels()):
            self.label_to_index_total[name] = i

        if self.random_samples is None and self.indices is None:
            raise AttributeError(
                'Expected one of `indices` or `random_samples`')

        if self.indices is None and self.random_samples is not None:
            self.indices = np.random.random_integers(low=0,
                                                     high=len(self.dataset) -
                                                     1,
                                                     size=self.random_samples)

    def get_image_transforms(self):
        return transforms.Compose([
            transforms.Resize((self.image_resize, self.image_resize),
                              interpolation=Image.NEAREST),
            transforms.ToTensor()
        ])

    def process_one(self, image, target, model_output_all, masks, image_index,
                    in_batch_index):
        raise NotImplementedError

    def run(self):
        self.model = self.model.to(self.device)
        self.model.eval()

        images_batch, target_batch, img_indices_batch = batch_by_indices(
            ds=self.dataset, indices=self.indices)
        masks = self.mask_loader[img_indices_batch]

        model_out = self.model(images_batch.to(self.device))

        for i, image_index in enumerate(img_indices_batch):
            self.process_one(image=images_batch[i],
                             target=target_batch[i],
                             model_output_all=model_out,
                             masks=masks[i],
                             image_index=image_index,
                             in_batch_index=i)
def train_net(dir_checkpoint,
              n_classes,
              bilinear,
              n_channels,
              device,
              epochs=30,
              val_percent=0.1,
              save_cp=True,
              img_scale=1):

    global best_val_iou_score
    global best_test_iou_score

    net = PAN()
    # net = smp.Unet(
    #     encoder_name='timm-regnety_120',  # choose encoder, e.g. mobilenet_v2 or efficientnet-b7
    #     encoder_weights=None,  # use `imagenet` pretrained weights for encoder initialization
    #     in_channels=3,  # model input channels (1 for grayscale images, 3 for RGB, etc.)
    #     classes=1,  # model output channels (number of classes in your dataset)
    # )
    net.to(device=device)

    dataset = BasicDataset(dir_img, dir_mask, img_scale)
    data_test = BasicDataset(imgs_dir=dir_img_test,
                             masks_dir=dir_mask_test,
                             train=False,
                             scale=img_scale)

    batch_size = 4
    train_loader = DataLoader(dataset,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=4,
                              pin_memory=True)
    test_loader = DataLoader(data_test,
                             batch_size=batch_size,
                             shuffle=False,
                             num_workers=2,
                             pin_memory=True,
                             drop_last=True)
    lr = 1e-5
    writer = SummaryWriter(
        comment=
        f'_{net.__class__.__name__}_LR_{lr}_BS_{batch_size}_SCALE_{img_scale}')
    global_step = 0

    logging.info(f'''Starting training:
        Epochs:          {epochs}
        Batch size:      {batch_size}
        Learning rate:   {lr}
        Checkpoints:     {save_cp}
        Device:          {device.type}
        Images scaling:  {img_scale}
    ''')

    optimizer = optim.RMSprop(net.parameters(),
                              lr=lr,
                              weight_decay=1e-8,
                              momentum=0.9)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, 'min' if n_classes > 1 else 'max', patience=2)
    if n_classes > 1:
        criterion = nn.CrossEntropyLoss()
    else:
        criterion = nn.BCEWithLogitsLoss()

    for epoch in range(epochs):
        net.train()
        epoch_loss = 0
        n_train = len(dataset)
        with tqdm(total=n_train,
                  desc=f'Epoch {epoch + 1}/{epochs}',
                  unit='img') as pbar:
            for batch in train_loader:
                imgs = batch['image']
                true_masks = batch['mask']
                assert imgs.shape[1] == n_channels, \
                    f'Network has been defined with {n_channels} input channels, ' \
                    f'but loaded images have {imgs.shape[1]} channels. Please check that ' \
                    'the images are loaded correctly.'

                imgs = imgs.to(device=device, dtype=torch.float32)
                mask_type = torch.float32 if n_classes == 1 else torch.long

                true_masks = true_masks.to(device=device, dtype=mask_type)
                masks_pred = net(imgs)  # return BCHW = 8_1_256_256
                _tem = net(imgs)
                # print("IS DIFFERENT OR NOT: ", torch.sum(masks_pred - _tem))

                true_masks = true_masks[:, :1, :, :]
                loss = criterion(masks_pred, true_masks)
                epoch_loss += loss.item()
                # writer.add_scalar('Loss/train', loss.item(), global_step)

                pbar.set_postfix(**{'loss (batch)': loss.item()})

                optimizer.zero_grad()
                loss.backward()
                nn.utils.clip_grad_value_(net.parameters(), 0.1)
                optimizer.step()

                pbar.update(imgs.shape[0])
                global_step += 1

        # Tính dice và iou score trên tập Test set, ghi vào tensorboard .
        test_score_dice, test_score_iou = eval_net(net, test_loader, n_classes,
                                                   device)
        if test_score_iou > best_test_iou_score:
            best_test_iou_score = test_score_iou
            try:
                os.mkdir(dir_checkpoint)
                logging.info('Created checkpoint directory')
            except OSError:
                pass
            torch.save(net.state_dict(),
                       dir_checkpoint + f'best_CP_epoch{epoch + 1}.pth')
            logging.info(f'Checkpoint {epoch + 1} saved !')
        logging.info('Test Dice Coeff: {}'.format(test_score_dice))
        print('Test Dice Coeff: {}'.format(test_score_dice))
        writer.add_scalar('Dice/test', test_score_dice, epoch)

        logging.info('Test IOU : {}'.format(test_score_iou))
        print('Test IOU : {}'.format(test_score_iou))
        writer.add_scalar('IOU/test', test_score_iou, epoch)
    print("best iou: ", best_test_iou_score)
Ejemplo n.º 14
0
def train_net(dir_checkpoint,
              n_classes,
              bilinear,
              n_channels,
              device,
              epochs=30,
              val_percent=0.1,
              save_cp=True,
              img_scale=1):

    global best_val_iou_score
    global best_test_iou_score

    net = PAN()
    ckpt_path = "/data.local/all/hangd/v1/uncertainty1/best_CP_epoch29_one32th_.pth"
    net.to(device=device)
    net.load_state_dict(torch.load(ckpt_path, map_location=device))
    writer = SummaryWriter(
        comment=f'_{net.__class__.__name__}_ece_one32nd_training_set')

    logging.info(f'Model loaded from {ckpt_path}')
    batch_size = 4

    dataset = BasicDataset(dir_img, dir_mask, True)
    pool_loader = DataLoader(dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=4,
                             pin_memory=True)

    # data_test = BasicDataset(imgs_dir=dir_img_test, masks_dir=dir_mask_test, train=False, scale=img_scale)
    # test_loader = DataLoader(data_test, batch_size=16, shuffle=False, num_workers=2, pin_memory=True,drop_last=True)

    logging.info(f'''Starting selecting in pool:
        Device:          {device.type}
    ''')

    epochs = 1
    # test_score_dice, test_score_iou = eval_net(net, test_loader, n_classes, device)
    # print(f"TEST iou = {test_score_iou}, dice = {test_score_dice} ")
    std = []
    for epoch in range(epochs):
        net.eval()
        epoch_loss = 0
        n_pool = len(dataset)
        with tqdm(total=n_pool,
                  desc='ECE calculating',
                  unit='batch',
                  leave=False) as pbar:
            for ind, batch in enumerate(tqdm(pool_loader)):
                imgs, true_masks = batch['image'], batch['mask']
                imgs = imgs.to(device=device, dtype=torch.float32)
                true_masks = true_masks.to(device=device,
                                           dtype=torch.float32)  # BHWC
                true_masks = true_masks[:, :1, :, :]

                y_pred_samples = []
                for i in range(GAUSS_ITERATION):
                    with torch.no_grad():
                        logits = net(imgs)
                        y_pred = torch.sigmoid(logits)
                        # y_pred = (y_pred > 0.5).float()
                        y_pred = y_pred[:, :1, :, :]
                        y_pred_samples.append(
                            y_pred[:, 0, :, :]
                        )  # y_pred_samples's shape: (inx, bat, H, W )
                y_pred_samples = torch.stack(y_pred_samples, dim=0)
                y_pred_samples = y_pred_samples.type(torch.FloatTensor)
                mean_y_pred = y_pred_samples.mean(dim=0)  # shape: batch, H, W
                std_y_pred = y_pred_samples.std(dim=0)  # shape: batch, H, W
                grid = torchvision.utils.make_grid(mean_y_pred.unsqueeze(1))
                writer.add_image('images', grid, ind)
                _std = get_segmentation_mask_uncertainty(std_y_pred)
                std.extend(_std)

    for ind, val in enumerate(std):
        writer.add_scalar("std in pool - case one32nd data", val, ind)

    writer.add_histogram("Histogram std corresponding", np.array(std), 1)
    std = torch.cuda.FloatTensor(std)
    mean = std.mean()
    _std = std.std()
    writer.add_scalar("Mean std", mean, 1)
    writer.add_scalar("STD std", _std, 1)
    print("Mean: ", mean)
    print("std: ", _std)
Ejemplo n.º 15
0
def train_net(dir_checkpoint,
              n_classes,
              bilinear,
              n_channels,
              device,
              epochs=30,
              val_percent=0.1,
              save_cp=True,
              img_scale=1):

    global best_val_iou_score
    global best_test_iou_score

    net = PAN()
    ckpt_path = "/data.local/all/hangd/v1/uncertainty1/best_CP_epoch15_test_iou_85_with_25_percent_original_training_dataset.pth"
    net.to(device=device)
    net.load_state_dict(torch.load(ckpt_path, map_location=device))
    logging.info(f'Model loaded from {ckpt_path}')

    dataset = BasicDataset(dir_img, dir_mask, img_scale)
    data_test = BasicDataset(imgs_dir=dir_img_test,
                             masks_dir=dir_mask_test,
                             train=False,
                             scale=img_scale)

    batch_size = 4
    train_loader = DataLoader(dataset,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=4,
                              pin_memory=True)
    lr = 1e-5
    writer = SummaryWriter(
        comment=f'_{net.__class__.__name__}_ECE_on_25percentage_data')
    global_step = 0

    logging.info(f'''Starting training:

        Device:          {device.type}
    ''')

    epochs = 1
    ece_values = []
    for epoch in range(epochs):
        net.train()
        epoch_loss = 0
        n_train = len(dataset)
        with tqdm(total=n_train,
                  desc='Validation round',
                  unit='batch',
                  leave=False) as pbar:
            for batch in train_loader:
                imgs, true_masks = batch['image'], batch['mask']
                imgs = imgs.to(device=device, dtype=torch.float32)
                true_masks = true_masks.to(device=device,
                                           dtype=torch.float32)  # BHWC
                true_masks = true_masks[:, :1, :, :]

                y_pred_samples = []
                for i in range(GAUSS_ITERATION):
                    with torch.no_grad():
                        logits = net(imgs)
                        y_pred = torch.sigmoid(logits)
                        # y_pred = (y_pred > 0.5).float()
                        y_pred = y_pred[:, :1, :, :]
                        y_pred_samples.append(
                            y_pred[:, 0, :, :]
                        )  # y_pred_samples's shape: (inx, bat, H, W )
                y_pred_samples = torch.stack(y_pred_samples, dim=0)
                y_pred_samples = y_pred_samples.type(torch.FloatTensor)
                mean_y_pred = y_pred_samples.mean(dim=0)  # shape: batch, H, W
                ece_values.extend(
                    get_segmentation_mask_uncertainty(mean_y_pred, true_masks))
                pbar.update()

    for inx, ece_val in enumerate(ece_values):
        writer.add_scalar("ECE_on_quarter_of_training_set", ece_val, inx)
Ejemplo n.º 16
0
def train_net(dir_checkpoint,
              n_classes,
              n_channels,
              device,
              epochs=30,
              save_cp=True,
              img_scale=1):
    global best_val_iou_score
    global best_test_iou_score

    net = PAN()
    net.to(device=device)
    batch_size = 4
    lr = 1e-5
    writer = SummaryWriter(
        comment=
        f'_{net.__class__.__name__}_LR_{lr}_BS_{batch_size}_categoryFirstEntropy_ACQUISITION'
    )
    global_step = 0

    logging.basicConfig(
        filename="./logging_one32nd_category.txt",
        filemode='a',
        format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
        datefmt='%Y-%m-%d %H:%M:%S',
        level=logging.DEBUG)

    logging.info(f'''Starting training:
        Epochs:          {epochs}
        Batch size:      {batch_size}
        Learning rate:   {lr}
        Checkpoints:     {save_cp}
        Device:          {device.type}
    ''')

    optimizer = optim.RMSprop(net.parameters(),
                              lr=lr,
                              weight_decay=1e-8,
                              momentum=0.9)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, 'min' if n_classes > 1 else 'max', patience=2)
    if n_classes > 1:
        criterion = nn.CrossEntropyLoss()
    else:
        criterion = nn.BCEWithLogitsLoss()
    num_phases = 25  # total 2689 imgs, within each phase: fetching 100 imgs to training set.
    training_pool_ids_path = "data_one32nd_category.json"
    all_training_data = "data_all.json"

    for phase in range(num_phases):
        # Within a phase, save the best epoch (having highest test_iou) checkpoint and save its test_iou to TF_Board
        #                 also, load the best right previous checkpoint
        selected_images = get_pool_data(training_pool_ids_path)
        data_train = RestrictedDataset(dir_img, dir_mask, selected_images)
        data_test = BasicDataset(imgs_dir=dir_img_test,
                                 masks_dir=dir_mask_test,
                                 train=False,
                                 scale=img_scale)

        train_loader = DataLoader(data_train,
                                  batch_size=batch_size,
                                  shuffle=True,
                                  num_workers=4,
                                  pin_memory=True)
        test_loader = DataLoader(data_test,
                                 batch_size=batch_size,
                                 shuffle=False,
                                 num_workers=2,
                                 pin_memory=True,
                                 drop_last=True)
        right_previous_ckpt_dir = Path(dir_checkpoint + 'ckpt.pth')
        if right_previous_ckpt_dir.is_file():
            net.load_state_dict(
                torch.load(dir_checkpoint + 'ckpt.pth', map_location=device))
        for epoch in range(epochs):
            net.train()
            epoch_loss = 0
            n_train = len(data_train)
            with tqdm(total=n_train,
                      desc=f'Epoch {epoch + 1}/{epochs}',
                      unit='img') as pbar:
                for batch in train_loader:
                    imgs = batch['image']
                    true_masks = batch['mask']
                    assert imgs.shape[1] == n_channels, \
                        f'Network has been defined with {n_channels} input channels, ' \
                        f'but loaded images have {imgs.shape[1]} channels. Please check that ' \
                        'the images are loaded correctly.'

                    imgs = imgs.to(device=device, dtype=torch.float32)
                    mask_type = torch.float32 if n_classes == 1 else torch.long

                    true_masks = true_masks.to(device=device, dtype=mask_type)
                    masks_pred = net(imgs)  # return BCHW = 8_1_256_256
                    _tem = net(imgs)
                    # print("IS DIFFERENT OR NOT: ", torch.sum(masks_pred - _tem))
                    true_masks = true_masks[:, :1, :, :]
                    loss = criterion(masks_pred, true_masks)
                    epoch_loss += loss.item()
                    # writer.add_scalar('Loss/train', loss.item(), global_step)
                    pbar.set_postfix(**{'loss (batch)': loss.item()})
                    optimizer.zero_grad()
                    loss.backward()
                    nn.utils.clip_grad_value_(net.parameters(), 0.1)
                    optimizer.step()
                    pbar.update(imgs.shape[0])
                    global_step += 1
            # Tính dice và iou score trên tập Test set, ghi vào tensorboard .
            test_score_dice, test_score_iou = eval_net(net, test_loader,
                                                       n_classes, device)
            if test_score_iou > best_test_iou_score:
                best_test_iou_score = test_score_iou
                try:
                    os.mkdir(dir_checkpoint)
                    logging.info('Created checkpoint directory')
                except OSError:
                    pass
                torch.save(
                    net.state_dict(),
                    dir_checkpoint + f'best_CP_epoch{epoch + 1}_one32th_.pth')
                logging.info(f'Checkpoint {epoch + 1} saved !')
            logging.info('Test Dice Coeff: {}'.format(test_score_dice))
            print('Test Dice Coeff: {}'.format(test_score_dice))
            writer.add_scalar(f'Phase_{phase}_Dice/test', test_score_dice,
                              epoch)

            logging.info('Test IOU : {}'.format(test_score_iou))
            print('Test IOU : {}'.format(test_score_iou))
            writer.add_scalar(f'Phase_{phase}_IOU/test', test_score_iou, epoch)
        print(f"Phase_{phase}_best iou: ", best_test_iou_score)
        torch.save(net.state_dict(), dir_checkpoint + 'ckpt.pth')
        writer.add_scalar('Phase_IOU/test', best_test_iou_score, phase)
        # Fetching data for next phase - Update pooling images.
        update_training_pool_ids_2(net,
                                   training_pool_ids_path,
                                   all_training_data,
                                   device,
                                   acquisition_func="cfe")

    writer.close()
Ejemplo n.º 17
0
    default='checkpoints/CP_epoch100.pth')  # checkpoints/CP_epoch51.pth

logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')

#using tensorboard: tensorboard --logdir=runs

if __name__ == "__main__":
    opt = parser.parse_args()
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    net = UNet(n_channels=3, n_classes=2)
    if opt.load_cp != '':
        net.load_state_dict(torch.load(opt.load_cp, map_location=device))
    net.to(device=device)
    # faster convolutions, but more memory
    # cudnn.benchmark = True
    dataset = BasicDataset(opt.dir_patch)
    n_val = int(len(dataset) * opt.val_percent)
    n_train = len(dataset) - n_val
    train, val = random_split(dataset, [n_train, n_val])
    train_loader = DataLoader(train,
                              batch_size=opt.batch_size,
                              shuffle=True,
                              num_workers=int(opt.workers),
                              pin_memory=True)
    val_loader = DataLoader(val,
                            batch_size=opt.batch_size,
                            shuffle=False,
                            num_workers=int(opt.workers),
                            pin_memory=True)
    writer = SummaryWriter(
        comment=f'_lr{opt.learning_rate}_bs{opt.batch_size}')
Ejemplo n.º 18
0
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 26 13:05:03 2020

@author: mooselumph
"""

import torch.utils.data
from dataset import BasicDataset

dataroot = 'C:/Users/mooselumph/code/data/velocity/'
dataset = BasicDataset(model_dir=dataroot)

dataloader = torch.utils.data.DataLoader(dataset,
                                         batch_size=10,
                                         shuffle=False,
                                         num_workers=1)

for batch in dataloader:
    print(batch.shape)
Ejemplo n.º 19
0
    image_folder = args.image_folder + '/' + args.dataset_name
    for i, fn in enumerate(os.listdir(image_folder)):
        if not fn.endswith('.jpg'):
            continue
        count += 1
        # input image
        img_path = os.path.join(image_folder, fn)
        target_path = img_path.replace('imgs', 'masks')
        
        # single image prediction
        mask, inference_time = segment_img(img_path, net, device, args.scale, args.mask_threshold)

        # target mask
        # FloatTensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
        FloatTensor = torch.cuda.FloatTensor if args.device == 'cuda' else torch.FloatTensor
        target = BasicDataset.preprocess(Image.open(target_path), args.scale)

        # validation score computation
        val_score = dice_coeff(FloatTensor(mask), FloatTensor(target).squeeze(0)).item()
        print(f'validation_score (dice_coeff): {val_score}')
        val_score_total += val_score

        # prediction speed
        inference_time_total += inference_time

        if args.save_results:
            output_path = os.path.join(output_dir, fn)
            mask_to_image(mask).save(output_path)
            print(f"saved predicted mask to ---> {output_path}")
                
        if args.viz: