示例#1
0
def main():
    try:
      fp = open(os.getenv('DKUBE_JOB_HP_TUNING_INFO_FILE', 'None'),'r')
      hyperparams = json.loads(fp.read())
      hyperparams['num_epochs'] = EPOCHS 
    except:
      hyperparams = {"batch_size": BATCH_SIZE, "num_epochs": EPOCHS, "learning_rate": 1e-3 }
      pass
    parser = argparse.ArgumentParser(description='Image Denoising Example')
    parser.add_argument('--learning_rate', type=float, default=float(hyperparams['learning_rate']), help='Learning rate for training.')
    parser.add_argument('--batch_size', type=int, default=int(hyperparams['batch_size']),
                        help='input batch size for training (default: 1024)')
    parser.add_argument('--num_epochs', type=int, default=int(hyperparams['num_epochs']),
                        help='number of epochs to train (default: 10)')
    global FLAGS
    FLAGS, unparsed = parser.parse_known_args()
    my_train_data,my_val_data=get_data('train_data')
    #### Start Training ####
    net = UNet(1, depth=3)
    trainHist, valHist = training.trainNetwork(net=net,trainData=my_train_data, valData=my_val_data,postfix=nameModel, directory=MODEL_DIR, noiseModel=None,numOfEpochs=FLAGS.num_epochs, stepsPerEpoch=STEPS,device=device, virtualBatchSize=20, batchSize=FLAGS.batch_size, learningRate=FLAGS.learning_rate, supervised=True)
    ### Copying the Class File to Model Directory ###
    shutil.copyfile(CLASS_FILE , os.path.join(MODEL_DIR,CLASS_FILE.split('/')[-1]))
    #### Metric Evaluation ####
    dataTest,dataTestGT = get_data('test_data')
    metric_evaluation(dataTest,dataTestGT)
    #### Logging the metrics #####
    step=1
    for i in range(0,FLAGS.num_epochs):
        log_metrics('Validation_Loss', valHist[i], i+1, step)
        step=step+1
    
    for i in range(0,FLAGS.num_epochs):
        log_metrics('Training_Loss', trainHist[i], i+1, step)
        step=step+1
示例#2
0
def metric_evaluation(dataTest,dataTestGT):
    careRes=[]
    resultImgs=[]
    inputImgs=[]
    model = UNet(1,depth=3)
    model.load_state_dict(torch.load(MODEL_DIR+'/model.pt'))
    model.eval()
    ## Testing on 5 images for demo purpose
    for index in range(5):
        im=dataTest[index]
        gt=dataTestGT[0] # The ground truth is the same for all images
        careResult = prediction.tiledPredict(im, model ,ps=256, overlap=48,device=device, noiseModel=None)
        inputImgs.append(im)
        rangePSNR=np.max(gt)-np.min(gt)
        carePrior=PSNR(gt, careResult, rangePSNR )
        careRes.append(carePrior) 
    print("Avg PSNR CARE:", np.mean(np.array(careRes) ), '+-(2SEM)',2*np.std(np.array(careRes) )/np.sqrt(float(len(careRes)) ) )
    #### Logging the metric using mlflow #######
    mlflow.log_metric("Avg_PSNR_CARE",np.mean(np.array(careRes)))
示例#3
0
    
   
    print ('save_file_name : ', save_file_name)

# # Load the training data

data = h5py.File(tr_data_dir, "r")
tr_source = data["noisy_images"][:22000,:,:]
val_source = data["noisy_images"][22000:,:,:]

data = h5py.File(te_data_dir, "r")
te_target = data["clean_images"][:,:,:]
te_source = data["noisy_images"][:,:,:]


# tr_target = data["clean"][:,:,:]


# The N2V network requires only a single output unit per pixel
net = UNet(1, depth=3)

# Split training and validation data.
my_train_data=tr_source.copy()
my_val_data_source=te_source.copy()

# Start training.
trainHist, valHist = training.trainNetwork(net=net, trainData=tr_source, valData=val_source, te_Data_target = te_target, te_Data_source = te_source,
                                           postfix='conv_N2V', directory=None, noiseModel=None,
                                           device=device, numOfEpochs= 200, stepsPerEpoch=10, 
                                           virtualBatchSize=20, batchSize=1, learningRate=1e-3, save_file_name = save_file_name)
示例#4
0
from unet.model import UNet
from matplotlib import pyplot
from unet.loader import ISBI_Loader, Test_Loader
from unet.loader import im_to_tensor, im_trans

if __name__ == "__main__":
    data_path = "/home/nonari/Documentos/tfgdata/tfgoct"
    isbi_dataset = Test_Loader(data_path, 0)
    train_loader = torch.utils.data.DataLoader(dataset=isbi_dataset,
                                               batch_size=1,
                                               shuffle=True)
    criterion = nn.BCEWithLogitsLoss()
    # Select the device, if there is cuda use cuda, if not, use cpu
    device = torch.device('cpu' if torch.cuda.is_available() else 'cpu')
    # Load the network, the picture is single channel, classified as 1.
    net = UNet(n_channels=1, n_classes=9)
    # Copy the network to the deivce
    net.to(device=device)
    # Load model parameters
    net.load_state_dict(
        torch.load('/home/nonari/Descargas/best_model_v1.pth',
                   map_location=device))
    # Test mode
    net.eval()

    img = cv2.imread("/home/nonari/Documentos/tfgdata/tfgoct/img_9_20.png")
    lab = cv2.imread("/home/nonari/Documentos/tfgdata/tfgoct/seg_9_20.png")
    # Convert to grayscale
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    lab = cv2.cvtColor(lab, cv2.COLOR_BGR2GRAY)
    # Convert to batch as 1, channel as 1, size 512*512 array
示例#5
0
                        help="Scale factor for the input images",
                        default=0.5)

    return parser.parse_args()


def mask_to_image(mask):
    return Image.fromarray((mask * 255).astype(np.uint8))


if __name__ == "__main__":

    args = get_args()
    in_files = os.listdir(args.source)

    net_all = UNet(n_channels=3, n_classes=1)
    net_vertical = UNet(n_channels=3, n_classes=1)

    print(f'Loading model {all_best_model}, {vertical_best_model}')

    device = torch.device(
        'cuda' if False and torch.cuda.is_available() else 'cpu')
    print(f'Using device {device}')
    net_all.to(device=device)
    net_vertical.to(device=device)
    net_all.load_state_dict(torch.load(all_best_model, map_location=device))
    net_vertical.load_state_dict(
        torch.load(vertical_best_model, map_location=device))
    print("Model loaded !")

    os.makedirs(dir_output_all, exist_ok=True)
示例#6
0
        default=20,
        help='Percent of the data that is used as validation (0-100)')

    return parser.parse_args()


if __name__ == '__main__':
    logging.basicConfig(level=logging.INFO,
                        format='%(levelname)s: %(message)s')
    args = get_args()
    device = torch.device(
        'cuda' if torch.cuda.is_available() and False else 'cpu')
    logging.info(f'Using device {device}')

    start_epoch, model_path = get_epoch_model()
    net = UNet(n_channels=n_channels, n_classes=n_classes)
    logging.info(
        f'Network:\n'
        f'\t{net.n_channels} input channels\n'
        f'\t{net.n_classes} output channels (classes)\n'
        f'\t{"Bilinear" if net.bilinear else "Dilated conv"} upscaling')

    if model_path is not None:
        net.load_state_dict(torch.load(model_path, map_location=device))
        logging.info(f'Model loaded from {model_path}')

    if not os.path.exists(dir_checkpoint):
        os.mkdir(dir_checkpoint)

    net.to(device=device)
示例#7
0
# #### Noisy Data (Input to network)

# In[4]:

# The CARE network requires only a single output unit per pixel
import os, glob
os.makedirs(options.dir, exist_ok=True)
nameModel=options.jobid

model_file = os.path.join(options.dir, "last.net")
if options.load_model and os.path.exists(model_file):
    net=torch.load(model_file)
    print('Model loaded from', model_file)
else:
    net = UNet(1, depth=options.unet_depth)

if nGPU > 1:
    print("Using", nGPU, "GPUs")
    pmodel = torch.nn.DataParallel(net)
    model = pmodel.module
else:
    pmodel = net
pmodel.to(device)

if options.test_only:
    predict(net, options.data)
    exit()

# data=imread(path+fileName)
# nameModel=dataName+'_care'
示例#8
0
文件: train.py 项目: Onojimi/src
                      help='load file model')
    parser.add_option('-s',
                      '--scale',
                      dest='scale',
                      default=1,
                      type=float,
                      help='downscaling factor of the images')

    (options, args) = parser.parse_args()
    return options


if __name__ == '__main__':
    args = get_args()
    #     os.environ["CUDA_VISIBLE_DEVICES"] = '0'
    net = UNet(input_channels=3, nclasses=1)

    #     net.cuda()
    #     import pdb
    #     from torchsummary import summary
    #     summary(net, (3,1000,1000))
    #     pdb.set_trace()
    if args.load:
        net.load_state_dict(torch.load(args.load))
        print('Model loaded from {}'.format(args.load))

    if args.gpu:
        if torch.cuda.device_count() > 1:
            net = nn.DataParallel(net)
        net.cuda()
示例#9
0
def main(args):
    presentParameters(vars(args))
    results_path = args.results
    if not os.path.exists(results_path):
        os.makedirs(results_path)

    save_args(args, modelpath=results_path)

    device = torch.device(args.device)
    if args.model == 'u-net':
        from unet.model import UNet
        model = UNet(in_channels=3, n_classes=1).to(device)
    elif args.model == 'fcd-net':
        from tiramisu.model import FCDenseNet
        # select model archictecture so it can be trained in 16gb ram GPU
        model = FCDenseNet(in_channels=3,
                           n_classes=1,
                           n_filter_first_conv=48,
                           n_pool=4,
                           growth_rate=8,
                           n_layers_per_block=3,
                           dropout_p=0.2).to(device)
    else:
        print(
            'Parsed model argument "{}" invalid. Possible choices are "u-net" or "fcd-net"'
            .format(args.model))

    # Init weights for model
    model = model.apply(weights_init)

    transforms = my_transforms(scale=args.aug_scale,
                               angle=args.aug_angle,
                               flip_prob=args.aug_flip)
    print('Trainable parameters for model {}: {}'.format(
        args.model, get_number_params(model)))

    # create pytorch dataset
    dataset = DataSetfromNumpy(
        image_npy_path='data/train_img_{}x{}.npy'.format(
            args.image_size, args.image_size),
        mask_npy_path='data/train_mask_{}x{}.npy'.format(
            args.image_size, args.image_size),
        transform=transforms)

    # create training and validation set
    n_val = int(len(dataset) * args.val_percent)
    n_train = len(dataset) - n_val
    train, val = random_split(dataset, [n_train, n_val])

    ## hacky solution: only add CustomToTensor transform in validation
    from utils.transform import CustomToTensor
    val.dataset.transform = CustomToTensor()

    print('Training the model with n_train: {} and n_val: {} images/masks'.
          format(n_train, n_val))
    train_loader = DataLoader(train,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=args.workers)
    val_loader = DataLoader(val,
                            batch_size=args.batch_size,
                            shuffle=False,
                            num_workers=args.workers)

    dc_loss = DiceLoss()
    writer = SummaryWriter(log_dir=os.path.join(args.logs, args.model))
    optimizer = Adam(params=model.parameters(), lr=args.lr)
    # Learning rate scheduler
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           mode='min',
                                                           factor=0.9,
                                                           patience=5)

    loss_train = []
    loss_valid = []

    # training loop:
    global_step = 0
    for epoch in range(args.epochs):
        eval_count = 0
        epoch_start_time = datetime.datetime.now().replace(microsecond=0)
        # set model into train mode
        model = model.train()
        train_epoch_loss = 0
        valid_epoch_loss = 0
        # tqdm progress bar
        with tqdm(total=n_train,
                  desc=f'Epoch {epoch + 1}/{args.epochs}',
                  unit='img') as pbar:
            for batch in train_loader:
                # retrieve images and masks and send to pytorch device
                imgs = batch['image'].to(device=device, dtype=torch.float32)
                true_masks = batch['mask'].to(
                    device=device,
                    dtype=torch.float32
                    if model.n_classes == 1 else torch.long)

                # compute prediction masks
                predicted_masks = model(imgs)
                if model.n_classes == 1:
                    predicted_masks = torch.sigmoid(predicted_masks)
                elif model.n_classes > 1:
                    predicted_masks = F.softmax(predicted_masks, dim=1)

                # compute dice loss
                loss = dc_loss(y_true=true_masks, y_pred=predicted_masks)
                train_epoch_loss += loss.item()
                # update model network weights
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                # logging
                writer.add_scalar('Loss/train', loss.item(), global_step)
                # update progress bar
                pbar.update(imgs.shape[0])
                # Do evaluation every 25 training steps
                if global_step % 25 == 0:
                    eval_count += 1
                    val_loss = np.mean(
                        eval_net(model, val_loader, device, dc_loss))
                    valid_epoch_loss += val_loss
                    writer.add_scalar('Loss/validation', val_loss, global_step)
                    if model.n_classes > 1:
                        pbar.set_postfix(
                            **{
                                'Training CE loss (batch)': loss.item(),
                                'Validation CE (val set)': val_loss
                            })
                    else:
                        pbar.set_postfix(
                            **{
                                'Training dice loss (batch)': loss.item(),
                                'Validation dice loss (val set)': val_loss
                            })

                global_step += 1
                # save images as well as true + predicted masks into writer
                if global_step % args.vis_images == 0:
                    writer.add_images('images', imgs, global_step)
                    if model.n_classes == 1:
                        writer.add_images('masks/true', true_masks,
                                          global_step)
                        writer.add_images('masks/pred', predicted_masks > 0.5,
                                          global_step)

            # Get estimation of training and validation loss for entire epoch
            valid_epoch_loss /= eval_count
            train_epoch_loss /= len(train_loader)

            # Apply learning rate scheduler per epoch
            scheduler.step(valid_epoch_loss)
            # Only save the model in case the validation metric is best. For the first epoch, directly save
            if epoch > 0:
                best_model_bool = [valid_epoch_loss < l for l in loss_valid]
                best_model_bool = np.all(best_model_bool)
            else:
                best_model_bool = True

            # append
            loss_train.append(train_epoch_loss)
            loss_valid.append(valid_epoch_loss)

            if best_model_bool:
                print(
                    '\nSaving model and optimizers at epoch {} with best validation loss of {}'
                    .format(epoch, valid_epoch_loss))
                torch.save(obj={
                    'model_state_dict': model.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                    'lr_scheduler': scheduler.state_dict(),
                },
                           f=results_path +
                           '/model_epoch-{}_val_loss-{}.pth'.format(
                               epoch, np.round(valid_epoch_loss, 4)))
                epoch_time_difference = datetime.datetime.now().replace(
                    microsecond=0) - epoch_start_time
                print('Epoch: {:3d} time execution: {}'.format(
                    epoch, epoch_time_difference))

    print(
        'Finished training the segmentation model.\nAll results can be found at: {}'
        .format(results_path))
    # save scalars dictionary as json file
    scalars = {'loss_train': loss_train, 'loss_valid': loss_valid}
    with open('{}/all_scalars.json'.format(results_path), 'w') as fp:
        json.dump(scalars, fp)

    print('Logging file for tensorboard is stored at {}'.format(args.logs))
    writer.close()
示例#10
0
    p = (size_of_psf - 1) // 2
    filt[p, p] = 1
    filt = torch.tensor(
        gaussian_filter(filt,
                        std_gauss).reshape(1, 1, size_of_psf,
                                           size_of_psf).astype(np.float32))
    filt = filt / torch.sum(filt)
    return filt


psf_tensor = artificial_psf()

####################################################
#           CREATE AND TRAIN NETWORK
####################################################
net = UNet(1, depth=args.netDepth)
net.psf = psf_tensor.to(device)
# Split training and validation data
splitter = np.int(data.shape[0] * args.validationFraction / 100.)
print("splitter = ", splitter)
my_train_data = data[:-splitter].copy()
my_val_data = data[-splitter:].copy()

# Start training
trainHist, valHist = training.trainNetwork(
    net=net,
    trainData=my_train_data,
    valData=my_val_data,
    postfix=args.name,
    directory=path,
    noiseModel=None,
示例#11
0
####################################################
#           PREPARE Noise Model
####################################################

histogram=np.load(path+args.histogram)

# Create a NoiseModel object from the histogram.
noiseModel=histNoiseModel.NoiseModel(histogram, device=device)


####################################################
#           CREATE AND TRAIN NETWORK
####################################################

net = UNet(800, depth=args.netDepth)

# Split training and validation data.
my_train_data=data[:-5].copy()
np.random.shuffle(my_train_data)
my_val_data=data[-5:].copy()
np.random.shuffle(my_val_data)

# Start training.
trainHist, valHist = training.trainNetwork(net=net, trainData=my_train_data, valData=my_val_data,
                                           postfix=args.name, directory=path, noiseModel=noiseModel,
                                           device=device, numOfEpochs= args.epochs, stepsPerEpoch=args.stepsPerEpoch,
                                           virtualBatchSize=args.virtualBatchSize, batchSize=args.batchSize,
                                           learningRate=args.learningRate,
                                           augment=False)
示例#12
0
                                  None])

    return np.stack(val_crops, axis=0)


val_sinograms = load_validation_sinograms(100, crop_size)
print('Validation set size: %d' % len(val_sinograms))
print('Validation shape: %s' % (val_sinograms.shape, ))

# %% [markdown]
# ### Create the Network and Train it
# This can take a while.

# %%
# Create a network with 800 output channels that are interpreted as samples from the prior.
net = UNet(800, depth=3)

# Split training and validation data.
# my_train_data=data[:-5].copy()
# my_val_data=data[-5:].copy()

# Start training.
os.makedirs("output", exist_ok=True)
trainHist, valHist = training.trainNetwork(
    net=net,
    trainData=train_generator,
    valData=val_sinograms,
    postfix='conv',
    directory="output",
    #noiseModel=noiseModel,
    patchSize=args.crop,