Beispiel #1
0
    def __init__(self, path, model, model_copy, img_save_path):
        self.path = path
        self.model = model
        self.model_copy = model_copy
        self.img_save_path = img_save_path
        # 使用的设备
        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')
        # 网络
        self.net = unet.UNet().to(self.device)

        self.opt = torch.optim.Adam(self.net.parameters(), lr=0.00001)
        self.loss_func = nn.BCELoss()

        self.loader = DataLoader(dataset=dataset.Datasets(path),
                                 batch_size=4,
                                 shuffle=True,
                                 num_workers=4)

        if os.path.exists(self.model):
            self.net.load_state_dict(torch.load(model))
            print(f'loaded{model}!')
        else:
            print('No Param!')
        os.makedirs(img_save_path, exist_ok=True)
Beispiel #2
0
    def __init__(self, path, model, model_copy, img_save_path):
        self.path = path
        self.model = model
        self.model_copy = model_copy
        self.img_save_path = img_save_path
        # 使用的设备
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        # 网络
        self.net = unet.UNet().to(self.device)
        # 优化器,这里用的Adam,跑得快点
        self.opt = torch.optim.Adam(self.net.parameters())
        # 这里直接使用二分类交叉熵来训练,效果可能不那么好
        # 可以使用其他损失,比如DiceLoss、FocalLoss之类的
        self.loss_func = nn.BCELoss()
        # 设备好,batch_size和num_workers可以给大点
        self.loader = DataLoader(dataset.Datasets(path),
                                 batch_size=1,
                                 shuffle=True,
                                 num_workers=0)

        # 判断是否存在模型
        if os.path.exists(self.model):
            self.net.load_state_dict(torch.load(model))
            print(f"Loaded{model}!")
        else:
            print("No Param!")
        os.makedirs(img_save_path, exist_ok=True)
Beispiel #3
0
 def __init__(self, shape1, shape2):
     self.network = unet.UNet()  #(shape1, shape2)
     self.network.cuda()
     self.loss_func = torch.nn.L1Loss(reduction='mean')
     self.mssim_loss = MSSSIM(window_size=3, size_average=True)
     self.crossengropy = torch.nn.CrossEntropyLoss()
     self.loss_func_MSE = torch.nn.MSELoss()
     self.model_num = 0
Beispiel #4
0
    def __init__(self, model_path=None, with_target=False, num_classes=110):
        mean_arr = [0.5, 0.5, 0.5]
        stddev_arr = [0.5, 0.5, 0.5]
        normalize = tv.transforms.Normalize(mean=mean_arr, std=stddev_arr)

        model_dimension = 224
        center_crop = 224
        self.data_transform = tv.transforms.Compose([
            #             tv.transforms.ToPILImage(),
            tv.transforms.Resize(model_dimension),
            tv.transforms.CenterCrop(center_crop),
            tv.transforms.ToTensor(),
            normalize,
        ])
        if with_target:
            self.model = unet.UNet(3, 3 * num_classes, batch_norm=True).cuda()
        else:
            self.model = unet.UNet(3, 3, batch_norm=True).cuda()

        if model_path is not None:
            self.model.load_state_dict(torch.load(model_path))
        self.model.eval()
Beispiel #5
0
def test():
    model = unet.UNet(1, 1)
    model.load_state_dict(torch.load(args.weight, map_location='cpu'))
    liver_dataset = LiverDataset("data/test/", transform=x_transform, target_transform=y_transform)
    dataloaders = DataLoader(liver_dataset)  # batch_size默认为1
    model.eval()
    import matplotlib.pyplot as plt
    plt.ion()
    with torch.no_grad():
        for x, _ in dataloaders:
            y = model(x)
            img_y = torch.squeeze(y).numpy()
            plt.imshow(img_y)
            plt.pause(0.01)
        plt.show()
Beispiel #6
0
def train():
    model = unet.UNet(1, 1).to(device)
    batch_size = args.batch_size
    # 损失函数
    criterion = torch.nn.BCELoss()
    # 梯度下降
    optimizer = optim.Adam(model.parameters())  # model.parameters():Returns an iterator over module parameters
    # 加载数据集
    liver_dataset = LiverDataset("data/train/", transform=x_transform, target_transform=y_transform)
    dataloader = DataLoader(liver_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
    # DataLoader:该接口主要用来将自定义的数据读取接口的输出或者PyTorch已有的数据读取接口的输入按照batch size封装成Tensor
    # batch_size:how many samples per minibatch to load,这里为4,数据集大小400,所以一共有100个minibatch
    # shuffle:每个epoch将数据打乱,这里epoch=10。一般在训练数据中会采用
    # num_workers:表示通过多个进程来导入数据,可以加快数据导入速度 
    train_model(model, criterion, optimizer, dataloader)
def test_sun_model(model_folder, depth=False):
    print('Evaluating folder:{}'.format(model_folder))
    unet = un.UNetRGBD(14) if depth else un.UNet(14)
    unet.load_state_dict(
        torch.load(os.path.join(model_folder, 'converted_clean_model.pth')))
    if on_gpu:
        unet.cuda()
    unet.eval()
    all_preds = []
    all_gt = []
    for i in range(1, 5051):
        img_id = str(i).zfill(4)
        try:
            scaled_rgb = np.load('sunrgbd_data/{}_rgb.npy'.format(img_id))
            scaled_depth = np.load('sunrgbd_data/{}_depth.npy'.format(img_id))
            gt = cv2.imread(
                os.path.join(
                    'sunrgbd_data/13_class_labels/{}_label.png'.format(
                        img_id)), cv2.IMREAD_ANYDEPTH)
        except:
            continue
        scaled_rgb = np.expand_dims(scaled_rgb, 0)
        scaled_depth = np.expand_dims(scaled_depth, 0)
        torch_rgb = torch.tensor(scaled_rgb, dtype=torch.float32)
        torch_depth = torch.tensor(scaled_depth, dtype=torch.float32)
        if on_gpu:
            if depth:
                pred = unet.forward((torch_rgb.cuda(), torch_depth.cuda()))
            else:
                pred = unet.forward(torch_rgb.cuda())
            pred_numpy = pred.cpu().detach().numpy()
        else:
            if depth:
                pred = unet.forward((torch_rgb, torch_depth))
            else:
                pred = unet.forward(torch_rgb)
            pred_numpy = pred.detach().numpy()
        new_pred = np.argmax(pred_numpy[0], axis=0)
        all_preds.append(new_pred)
        all_gt.append(gt)
    segmentation_evaluation.evaluate_lists(all_preds, all_gt)
Beispiel #8
0
from tensorflow.keras.models import load_model
import unet
import tensorflow as tf

model_file = 'unet.h5'

# DEFINE THE BASE DIRECTORY
base_dir = '/Users/tdincer/ML/NN_exercises/UNET'
train_im_folder = base_dir + '/data/train'

# FEED THE PATHS
_, _, val_im, val_seg = assign_paths(train_im_folder,
                                     file_format='.png',
                                     split_no=5)

net = unet.UNet()
net.model = load_model(model_file)

net.process_val(val_im, val_seg)
img, seg = next(iter(net.valset.shuffle(1)))
res = net.model.predict(tf.reshape(img, [1, 128, 128, 1]))

fig, (ax1, ax2, ax3) = plt.subplots(1, 3)
x1 = ax1.imshow(img.numpy().reshape(128, 128))
colorbar(x1)
x2 = ax2.imshow(np.argmax(seg, -1).reshape(128, 128))
colorbar(x2)
x3 = ax3.imshow(np.argmax(res, -1).reshape(128, 128))
colorbar(x3)
ax1.set_title('Input')
ax2.set_title('Ground Truth')
Beispiel #9
0
        loss = criterion(pred, lbl)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        print('epoch %d, step %d, loss %.4f' % (epoch, i, loss.data.item()))
        # print('best epoch %d, miou %.4f'%(logs['best_ep'], logs['best']))
        # visualize
        # writer.add_scalar('M_global', loss.data.item(), epoch*n_batch+i)


if __name__ == '__main__':

    print('start choose base net!')
    if netflag == 'unet':
        net = unet.UNet(n_classes=num_classes,
                        padding=True,
                        up_mode='upsample')
    else:
        net = fcn(num_classes)
    net = net.cuda()
    print('set up net correctelly')

    # optimizer = optim.SGD(net.parameters(), lr=1e-2, weight_decay=1e-4)

    # 实例化数据集

    voc_train = VOCSegDataset(True, input_shape, img_transforms)
    voc_test = VOCSegDataset(False, input_shape, img_transforms)

    train_loader = DataLoader(voc_train,
                              batch_size=8,
Beispiel #10
0
        normalize,
    ])

    test_transform = transforms.Compose([
        transforms.Resize(model_dimension),
        transforms.CenterCrop(center_crop),
        transforms.ToTensor(),
        normalize,
    ])

    # just use cpu for example
    pretrained_model = pretrained_model.cuda()
    pretrained_model.eval()
    pretrained_model.volatile = True
    if with_target:
        attack_net = unet.UNet(3, 3 * num_classes, batch_norm=True).cuda()
    else:
        attack_net = unet.UNet(3, 3, batch_norm=True).cuda()

    train_dataset = image_from_json.ImageDataSet(
        '/home/jinlukang/IJCAI/fast_attack/Attack-master/data/IJCAI_2019_AAAC_train/info.json',
        transform=train_transform)
    train_sampler = torch.utils.data.sampler.RandomSampler(
        train_dataset, True, num_classes * 5)
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               num_workers=16,
                                               batch_size=batch_size,
                                               sampler=train_sampler)

    test_dataset = image_list_folder.ImageListFolder(
        root='/home/jinlukang/IJCAI/fast_attack/Attack-master/data/dev_data/',
Beispiel #11
0
test_sa = helpers.load_images(files_sa_test, IMG_DIM, 2*sum(nr_slices_test))
test_sa = helpers.preprocess_test(test_sa)
test_sa = test_sa[np.newaxis,...]
test_sa = np.transpose(test_sa, (3, 0, 1, 2)) # rearrange axes for testing

test_gt = helpers.load_images(files_gt_test, IMG_DIM, 2*sum(nr_slices_test))
test_gt = test_gt[np.newaxis,...]
test_gt = np.transpose(test_gt, (3, 0, 1, 2)) # rearrange axes for testing

# add test data to pytorch data set
test_dataset = TensorDataset( torch.Tensor(test_sa), torch.Tensor(test_gt) )
test_dataloader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False)

# load model
model = unet.UNet(num_classes=NUM_CLASSES) # create instance of unet class
model.load_state_dict(torch.load('unet_conv_net_model3_550.ckpt')) # load weights of pretrained model
model.eval()
dice_score = np.zeros(test_sa.shape[0])

with torch.no_grad():
    for i, (image, label) in enumerate(test_dataloader):
        prediction = model(image) 
        segmented_img = torch.argmax(prediction,1) # for each pixel choose class with highest probability
        label_img = torch.squeeze(label)

        # dice score between the predicted image and label
        for j in range(image.shape[0]):
            curr_img = segmented_img[j,:,:]
            curr_lab = label_img[j,:,:]
            dice_score[i*BATCH_SIZE + j] = helpers.dice_metric(curr_lab,curr_img,NUM_CLASSES)
Beispiel #12
0
import unet
import utils
import tensorflow as tf
import matplotlib.pyplot as plt
from augmenter import augment_batch

# INPUT CHARACTERISTICS
OUTPUT_CHANNELS = 2
IMG_WIDTH = 128
IMG_HEIGHT = 128

# LOAD THE MODEL
AUTOTUNE = 1
BATCH_SIZE = 4
net = unet.UNet(IMG_WIDTH, IMG_HEIGHT, OUTPUT_CHANNELS)
net.get_unet()

# DEFINE THE BASE DIRECTORY
base_dir = '/Users/tdincer/ML/NN_exercises/UNET'
train_im_folder = base_dir + '/data/train'

# MAKE THE PATHS
train_im, train_seg, validation_im, validation_seg = utils.assign_paths(
    train_im_folder, file_format='.png', split_no=5)

# DEFINE THE TRAIN DATA FLOW
tf_trainset = tf.data.Dataset.from_tensor_slices((train_im, train_seg))
tf_trainset = tf_trainset.map(utils.process_path, num_parallel_calls=AUTOTUNE)
tf_trainset = tf_trainset.map(augment_batch, num_parallel_calls=AUTOTUNE)
tf_trainset = tf_trainset.map(utils.one_hot_label, num_parallel_calls=AUTOTUNE)
Beispiel #13
0
    trainloader = data.DataLoader(trainset,
                                  batch_size=10,
                                  shuffle=True,
                                  num_workers=12)
    testloader = data.DataLoader(testset,
                                 batch_size=5,
                                 shuffle=False,
                                 num_workers=12)

    a = "cuda:" + str(args.cuda)
    device = torch.device(a if torch.cuda.is_available() else "cpu")
    criterion1 = nn.BCEWithLogitsLoss().to(device)
    #criterion1 = nn.Sigmoid().to(device)
    #criterion2 = FocalLoss().to(device)
    if args.mul:
        net1 = unet.UNet() if args.ver == 1 else unet_6.UNet()
    else:
        net = unet.run_cnn() if args.ver == 1 else unet_6.run_cnn()
    vall = False
    if args.pre is not None:
        checkpoint = torch.load(args.pre)
        if args.mul:
            net1.load_state_dict(checkpoint["net"])
            for child in net1.children():
                for param in child.parameters():
                    param.requires_grad = True
            net2 = run_cnn2()
            for child in net2.children():
                for param in child.parameters():
                    param.requires_grad = True
            with torch.no_grad():
Beispiel #14
0
def main(trainDir, valDir, modelDir):
    trainDataset = FrankaSegmentationDataset(trainDir)
    trainDataLoader = DataLoader(trainDataset)

    valDataset = FrankaSegmentationDataset(valDir)
    valDataLoader = DataLoader(valDataset,
                               batch_size=2,
                               shuffle=False,
                               num_workers=5)

    if not os.path.exists(modelDir):
        os.makedirs(modelDir)

    model = unet.UNet(n_channels=3, n_classes=1)
    model = model.float()
    model = model.cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                lr=0.001,
                                momentum=0.9,
                                weight_decay=5e-4)
    criterion = torch.nn.BCELoss()

    trainWriter = SummaryWriter("logs/train")
    valWriter = SummaryWriter("logs/val")

    trainIter = 1
    valIter = 1
    globalIter = 1
    for epoch in range(31):
        epochTrainLoss = 0
        model.train()
        print("training started")
        for batch, (image, mask_true) in enumerate(trainDataLoader):

            image = image.cuda()
            mask_true = mask_true.cuda().float()
            mask_pred = model(image.float())

            loss = criterion(mask_pred, mask_true)
            epochTrainLoss += loss.item()

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            print("Epoch: %d\tBatch: %d\tTraining Loss: %f" %
                  (epoch, batch, loss.detach().cpu().item()))
            trainWriter.add_scalar('train_loss',
                                   loss.detach().cpu().item(), trainIter)
            trainWriter.add_scalar('train_val_loss_combined',
                                   loss.detach().cpu().item(), globalIter)
            trainIter += 1
            globalIter += 1

        trainWriter.add_scalar('epoch_train_loss',
                               epochTrainLoss / (batch + 1), epoch)
        if (epoch % 10 == 0):
            torch.save(
                model.state_dict(), modelDir + "/pytorchmodel_epoch" +
                str(epoch) + time.strftime("_%Y%m%d_%H_%M_%S"))

        print("Validation started...")
        epochValLoss = 0
        model.eval()
        with torch.no_grad():
            for batch, (image, mask_true) in enumerate(valDataLoader):

                image = image.cuda()
                mask_true = mask_true.cuda().float()
                mask_pred = model(image.float())

                loss = criterion(mask_pred, mask_true)
                epochValLoss += loss

                print("Epoch: %d\tBatch: %d\tValidation Loss: %f" %
                      (epoch, batch, loss.detach().cpu().item()))
                valWriter.add_scalar('val_loss',
                                     loss.detach().cpu().item(), valIter)
                valWriter.add_scalar('train_val_loss_combined',
                                     loss.detach().cpu().item(), globalIter)
                valIter += 1
                globalIter += 1

            valWriter.add_scalar('epoch_val_loss', epochValLoss / (batch + 1),
                                 epoch)

        trainWriter.close()
        valWriter.close()
Beispiel #15
0
def Trainer(opt):
    # ----------------------------------------
    #       Network training parameters
    # ----------------------------------------

    # cudnn benchmark
    cudnn.benchmark = opt.cudnn_benchmark

    # Loss functions
    # criterion_L1 = torch.nn.L1Loss().cuda()
    # criterion = MS_SSIM_Loss(data_range=1.0, size_average=True, channel=3).cuda()
    criterion = nn.MSELoss().cuda()

    # Initialize model
    if opt.model == 'SGN':
        model = utils.create_generator(opt)
    elif opt.model == 'UNet':
        if opt.load:
            model = torch.load(opt.load).module
            print(f'Model loaded from {opt.load}')
        else:
            model = unet.UNet(opt.in_channels, opt.out_channels,
                              opt.start_channels)
    else:
        raise NotImplementedError(opt.model + 'is not implemented')

    dir_checkpoint = 'checkpoints/'
    try:
        os.mkdir(dir_checkpoint)
        print('Created checkpoint directory')
    except OSError:
        pass

    writer = SummaryWriter(
        comment=f'_{opt.model}_LR_{opt.lr}_BS_{opt.batch_size}')

    # To device
    if opt.multi_gpu:
        model = nn.DataParallel(model)
        model = model.cuda()
    else:
        model = model.cuda()

    # Optimizers
    optimizer_G = torch.optim.Adam(model.parameters(),
                                   lr=opt.lr,
                                   betas=(opt.b1, opt.b2),
                                   weight_decay=opt.weight_decay)

    # Learning rate decrease
    def adjust_learning_rate(opt, iteration, optimizer):
        # Set the learning rate to the specific value
        if iteration >= opt.iter_decreased:
            for param_group in optimizer.param_groups:
                param_group['lr'] = opt.lr_decreased

    # Save the model if pre_train == True
    def save_model(opt, epoch, iteration, len_dataset, network):
        """Save the model at "checkpoint_interval" and its multiple"""
        if (epoch % opt.save_interval == 0) and (iteration % len_dataset == 0):
            torch.save(
                network, dir_checkpoint + '%s_epoch%d_bs%d_mu%d_sigma%d.pth' %
                (opt.model, epoch, opt.batch_size, opt.mu, opt.sigma))
            print('The trained model is successfully saved at epoch %d' %
                  (epoch))

        if (epoch % opt.validate_interval == 0) and (iteration % len_dataset
                                                     == 0):
            psnr = validation.validate(network, opt)
            print('validate PSNR:', psnr)
            writer.add_scalar('PSNR/validate', psnr, iteration)

    # ----------------------------------------
    #             Network dataset
    # ----------------------------------------

    # Define the dataset
    opt.dataroot = opt.baseroot + 'DIV2K_train_HR'
    trainset = dataset.DenoisingDataset(opt)
    print('The overall number of images:', len(trainset))

    # Define the dataloader
    dataloader = DataLoader(trainset,
                            batch_size=opt.batch_size,
                            shuffle=True,
                            num_workers=opt.num_workers,
                            pin_memory=True)

    # ----------------------------------------
    #                 Training
    # ----------------------------------------

    # Count start time
    prev_time = time.time()

    # For loop training
    for epoch in range(opt.epochs):
        for i, (noisy_img, img) in enumerate(dataloader):
            # To device
            noisy_img = noisy_img.cuda()
            img = img.cuda()

            # Train model
            optimizer_G.zero_grad()

            # Forword propagation
            recon_img = model(noisy_img)
            loss = criterion(recon_img, img)

            # Overall Loss and optimize
            loss.backward()
            optimizer_G.step()

            # Determine approximate time left
            iters_done = epoch * len(dataloader) + i
            iters_left = opt.epochs * len(dataloader) - iters_done
            time_left = datetime.timedelta(seconds=iters_left *
                                           (time.time() - prev_time))
            prev_time = time.time()

            # Print log
            print(
                "\r[Epoch %d/%d] [Batch %d/%d] [Recon Loss: %.4f] Time_left: %s"
                % ((epoch + 1), opt.epochs, i, len(dataloader), loss.item(),
                   time_left))

            writer.add_scalar('Loss/train', loss.item(), iters_done)

            # Save model at certain epochs or iterations
            save_model(opt, (epoch + 1), (iters_done + 1), len(dataloader),
                       model)

            # Learning rate decrease at certain epochs
            adjust_learning_rate(opt, (iters_done + 1), optimizer_G)
test_sa = helpers.load_images(files_sa_test, IMG_DIM, 2*sum(nr_slices_test))

train_gt = helpers.load_images(files_gt_train, IMG_DIM, 2*sum(nr_slices_train))
validate_gt = helpers.load_images(files_gt_validate, IMG_DIM, 2*sum(nr_slices_validate))
test_gt = helpers.load_images(files_gt_test, IMG_DIM, 2*sum(nr_slices_test))

# data augmentation
train_sa_augmented, train_gt_augmented = helpers.data_augmentation(train_sa, train_gt, 10)

# add training data to pytorch dataset
train_dataset = TensorDataset( torch.Tensor(train_sa_augmented), torch.Tensor(train_gt_augmented) )
train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)

      
# train network
unet_model = unet.UNet(num_classes=4) # create an instance of the unet class
loss_function = nn.CrossEntropyLoss() 
optimizer = optim.Adam(unet_model.parameters(), lr=LEARNING_RATE)

running_loss = 0 
printfreq = 1
savefreq = 2
for epoch in range(NUM_EPOCHS):
    for i, data in enumerate(train_dataloader):
        inputs, labels = data
        optimizer.zero_grad()
        outputs =unet_model(inputs) # forward pass
        labels = labels.type(torch.LongTensor)
        loss = loss_function(outputs, labels)
        loss.backward() # backpropagate loss
        optimizer.step() # update weights
Beispiel #17
0
def main(testDir, modelDir):
    testDataset = FrankaSegmentationDataset(testDir)
    testDataLoader = DataLoader(testDataset,
                                batch_size=1,
                                shuffle=False,
                                num_workers=5)

    model = unet.UNet(n_channels=3, n_classes=1)
    model = model.float()
    model = model.cuda()

    criterion = torch.nn.BCELoss()
    model.load_state_dict(torch.load(modelDir))

    backSub = cv2.createBackgroundSubtractorMOG2()

    print("Testing started")
    counter = 0
    robotThreshold = 2

    ### Very important to set no_grad(), else model might train on testing data ###
    model.eval()
    with torch.no_grad():
        for batch, (image, mask_true) in enumerate(testDataLoader):
            image = image.cuda()
            mask_true = mask_true.cuda().float()
            mask_pred = model(image.float())

            loss = criterion(mask_pred, mask_true)
            print("Batch: %d\tTesting Loss: %f" %
                  (batch, loss.detach().cpu().item()))

            maskBatch = mask_pred.detach().cpu().numpy()
            maskTrueBatch = mask_true.detach().cpu().numpy()

            for i in range(maskBatch.shape[0]):
                mask = maskBatch[i].squeeze()
                maskTrue = maskTrueBatch[i].squeeze()
                tempImage = image[i]
                tempImage = np.rollaxis(tempImage.detach().cpu().numpy(), 0, 3)
                fgMask = backSub.apply(tempImage)

                kernel = np.ones((5, 5), np.uint8)
                maskOpened = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)

                masked_image_pred = copy.deepcopy(tempImage)
                masked_image_pred_opened = copy.deepcopy(tempImage)
                masked_image_true = copy.deepcopy(tempImage)

                masked_image_pred[mask < 0.9] = 0
                masked_image_pred_opened[maskOpened < 0.2] = 0
                masked_image_true[maskTrue < 0.4] = 0
                stacked_image = np.hstack(
                    [tempImage, masked_image_pred, masked_image_pred_opened])
                counter += 1

                maskCopy = copy.deepcopy(mask) * 255
                maskCopy[maskCopy < robotThreshold] = 0
                maskedImage = applyMask(tempImage, maskCopy, 200)

                cv2.imshow("maskedImage", maskedImage)
                cv2.imshow("rgbImage", tempImage)
                cv2.waitKey()
Beispiel #18
0
    model.eval()
    model.load_state_dict(torch.load(args.load, map_location='cuda'))
    test_dataset = MyDataset("data/test/imgs", "data/test/masks")
    test_dataloader = DataLoader(test_dataset)
    with torch.no_grad():
        k = 0
        for x, _ in test_dataloader:
            y = model(x.to(device))
            img_y = torch.squeeze(y).to('cpu').numpy() > 0.5
            plt.imsave(str(k) + '.png', img_y, format='png', cmap='gray')
            k = k + 1


if __name__ == '__main__':
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = unet.UNet(1, 1).to(device)
    parser = argparse.ArgumentParser()
    parser.add_argument('--type',
                        dest='type',
                        type=str,
                        default='train',
                        help='train or test')
    parser.add_argument('--batch_size',
                        dest='batch_size',
                        type=int,
                        default=8,
                        help='batch_size')
    parser.add_argument('--load',
                        dest='load',
                        type=str,
                        help='the path of the .pth file')
Beispiel #19
0
# target image transform
tgt_trans = tsf.Compose([
    tsf.ToPILImage(),
    tsf.Resize((128, 128), interpolation=PIL.Image.NEAREST),
    tsf.ToTensor(),
])

# load the train dataset
train_dataset = nuclei.NUCLEI(train_data, src_trans, tgt_trans)
train_loader = t.utils.data.DataLoader(dataset=train_dataset,
                                       num_workers=num_workers,
                                       batch_size=train_batch_size,
                                       shuffle=True)

# load the model
model = unet.UNet(3, 1)  #.cuda()
optimizer = t.optim.Adam(model.parameters(), lr=learning_rate)

# =============
# checkpoint
# ===========
checkpoint_file = THIS_DIR + '/data/checkpoint-' + str(
    uuid.uuid4()) + '.pth.tar'

# iterate over the train data set for training
best_loss = 99999
for epoch in range(num_epochs):
    train_loss = Average()
    model.train()
    for i, (x_train, y_train) in enumerate(train_loader):
        x_train = t.autograd.Variable(x_train)  #.cuda())
def main(args):
    # Loading the dataset
    trans = transforms.Compose([
        RandomCrop_Segmentation(640),
        Flip_Segmentation(),
        Rotate_Segmentation()
    ])
    val_trans = RandomCrop_Segmentation(640)

    train_dataset = Numpy_SegmentationDataset(
        os.path.join(args.image_dir_path, 'train'),
        os.path.join(args.GT_dir_path, 'train'),
        transform=trans)
    val_dataset = Numpy_SegmentationDataset(
        os.path.join(args.image_dir_path, 'val'),
        os.path.join(args.GT_dir_path, 'val'),
        transform=val_trans)

    train_loader = data_utils.DataLoader(train_dataset,
                                         batch_size=args.batch_size,
                                         shuffle=True,
                                         num_workers=2)
    val_loader = data_utils.DataLoader(val_dataset,
                                       batch_size=1,
                                       shuffle=True,
                                       num_workers=1)
    loaders = {'train': train_loader, 'val': val_loader}
    dataset_sizes = {'train': len(train_dataset), 'val': len(val_dataset)}

    print("Complete the preparing dataset")

    # Loading the class balancing weight
    if args.weight is not None:
        weight = np.load(args.weight)
        weight = torch.from_numpy(weight)
    else:
        weight = None

    # Setting the network
    model = args.model
    if model == 'segnet':
        net = segnet.SegNet(args.band_num, args.class_num, args.dropout_ratio)
    elif model == 'fcn32s':
        net = fcn.FCN32(args.band_num, args.class_num)
    elif model == 'fcn8s':
        net = fcn.FCN8(args.band_num, args.class_num)
    elif model == 'unet':
        net = unet.UNet(args.class_num, args.band_num)
    else:
        print('The model is not added please implement and add')
        sys.exit()

    # load the model parameter
    if args.pretrained_model_path:
        print('load the pretraind model.')
        th = torch.load(args.pretrained_model_path)
        net.load_state_dict(th)
    net.cuda()

    # Define a Loss function and optimizer
    criterion = FocalLoss2d(gamma=args.gamma, weight=weight).cuda()
    optimizer = optim.SGD(net.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    scheduler = optim.lr_scheduler.StepLR(optimizer, 90)

    # initialize the best accuracy and best model weights
    best_model_wts = net.state_dict()
    best_acc = 0.0

    # initialize the loss and accuracy history
    loss_history = {"train": [], "val": []}
    acc_history = {"train": [], "val": []}

    # Train the network
    start_time = time.time()
    for epoch in range(args.epochs):
        scheduler.step()
        print('* ' * 20)
        print('Epoch {}/{}'.format(epoch + 1, args.epochs))
        print('* ' * 20)

        # Each epoch has a training and validation phase
        for phase in ['train', 'val']:
            if phase == 'train':
                net.train(True)
            else:
                net.train(False)

            # initialize the runnig loss and corrects
            running_loss = 0.0
            running_corrects = 0

            for i, data in enumerate(loaders[phase]):
                # get the input
                inputs, labels = data
                inputs = inputs.float()
                inputs = inputs / 255.0

                # wrap the in valiables
                if phase == 'train':
                    inputs, labels = Variable(inputs.cuda()), Variable(
                        labels.long().cuda())
                else:
                    inputs = Variable(inputs.cuda(), volatile=True)
                    labels = Variable(labels.long().cuda())

                # zero the parameter gradients
                optimizer.zero_grad()

                # forward
                outputs = net(inputs)
                _, preds = torch.max(outputs.data, 1)
                n, c, h, w = labels.size()
                labels = labels.view(n, h, w)
                loss = criterion(outputs, labels)

                # backward + optimize if in training phase
                if phase == 'train':
                    loss.backward()
                    optimizer.step()

                # statuctics
                running_loss += loss.data[0]
                running_corrects += torch.sum(preds == labels.data)

            epoch_loss = running_loss / dataset_sizes[phase]
            epoch_acc = running_corrects / dataset_sizes[phase] / (640 * 640)
            loss_history[phase].append(epoch_loss)
            acc_history[phase].append(epoch_acc)

            print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss,
                                                       epoch_acc))

            # copy the best model
            if phase == 'val' and epoch_acc > best_acc:
                best_acc = epoch_acc
                best_model_wts = net.state_dict()

    elapsed_time = time.time() - start_time
    print('Training complete in {:.0f}m {:.0f}s'.format(
        elapsed_time // 60, elapsed_time % 60))
    print('Best val Acc: {:4f}'.format(best_acc))

    # load best model weights
    net.load_state_dict(best_model_wts)
    return net, loss_history, acc_history
    # ap = argparse.ArgumentParser()
    # ap.add_argument("-train", "--train", type=bool, default = True,
    #     help="Training set")
    # ap.add_argument("-test", "--test", type=bool, default=False,
    #     help="Testing set")
    # args = vars(ap.parse_args())

    # # grab the number of GPUs and store it in a conveience variable
    # train = args["train"]
    # test = args["test"]

    train = args.train
    test = args.test
    pred = args.predict
    input_img = Input((im_height, im_width, 1), name='img')
    model = unet.UNet(input_img)

    # save_path=cwd + '/weights/weight.h5'

    # loss = "binary_crossentropy"

    # Train
    if train:
        train_model(model, save_path=save_path, loss=loss)

    # Test
    if test:
        weight_path = save_path
        threshold = 0.5
        test_model(weight_path, threshold)
    if pred: