Ejemplo n.º 1
0
def main():
    # Load data
    print("=> Load data...")
    root = './STL10/'
    data = STL10(opt.path_data, split='train', transform=transform_train, download=True)
    loader_train = torch.utils.data.DataLoader(data, batch_size=opt.batch_size, shuffle=True, num_workers=2)
    # Build model
    print("=> Build model...")
    net = DnCNN(channels=1, num_of_layers=opt.num_of_layers)
    if opt.resume and os.path.exists(os.path.join(opt.outf, 'net.pth')):
        print("Resuming training.")
        net.load_state_dict(torch.load(os.path.join(opt.outf, 'net.pth')))
    else:
        print("Training from scratch.")
        net.apply(weights_init_kaiming)
    # Loss
    criterion = nn.MSELoss(size_average=False)
    # Optimizer
    optimizer = optim.Adam(net.parameters(), lr=opt.lr)
    # Training
    step = 0
    print("=> Begin training...")
    for epoch in range(opt.epochs):
        if epoch < opt.milestone:
            current_lr = opt.lr
        else:
            current_lr = opt.lr / 5.
        # Set learning rate
        for param_group in optimizer.param_groups:
            param_group["lr"] = current_lr
        print('Learning rate %f' % current_lr)
        # Train
        for i, (img_train, imgn_train) in enumerate(loader_train, 0):
            # training step
            net.train()
            net.zero_grad()
            optimizer.zero_grad()
            out_train = net(imgn_train.float())
            loss = criterion(out_train, img_train) / (imgn_train.size()[0]*2)
            loss.backward()
            optimizer.step()
            # Results
            net.eval()
            out_train = torch.clamp(out_train, 0., 1.)
            psnr_train = batch_PSNR(out_train, img_train, 1.)
            print("[epoch %d][%d/%d] loss: %.4f PSNR_train: %.4f" %
                (epoch+1, i+1, len(loader_train), loss.item(), psnr_train))
            step += 1

        # Save model
        torch.save(net.state_dict(), os.path.join(opt.outf, 'net.pth'))
Ejemplo n.º 2
0
def main():

    # creat_readme()
    # choose cpu or gpu
    if torch.cuda.is_available():
        args.device = torch.device('cuda')
    else:
        args.device = torch.device('cpu')

    print('Loading Dataset--')
    dataset_train = RootDataset(root_file=args.trainfile, sigma=args.sigma)
    loader_train = DataLoader(dataset=dataset_train, batch_size=args.batchSize)
    dataset_val = RootDataset(root_file=args.valfile, sigma=args.sigma)
    val_train = DataLoader(dataset=dataset_val, batch_size=args.batchSize)

    # Build model
    model = DnCNN(channels=1,
                  num_of_layers=args.num_of_layers,
                  ker_size=args.kernelSize,
                  o_k_size=args.outKerSize).to(device=args.device)
    if (args.model == None):
        model.apply(init_weights)
        print("Creating new model")
    else:
        print("Loading model from file" + args.model)
        model.load_state_dict(torch.load(args.model))
        model.eval()

    # Loss function
    criterion = PatchLoss()
    criterion.to(device=args.device)

    #Optimizer
    MyOptim = optim.Adam(model.parameters(), lr=args.lr)
    MyScheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=MyOptim,
                                                       factor=0.1,
                                                       patience=10,
                                                       verbose=True)

    # training and validation
    step = 0
    training_losses = np.zeros(args.epochs)
    validation_losses = np.zeros(args.epochs)
    for epoch in range(args.epochs):
        print("Epoch #" + str(epoch))
        # training
        train_loss = 0
        for i, data in enumerate(loader_train, 0):
            model.train()
            model.zero_grad()
            MyOptim.zero_grad()
            truth, noise = data
            noise = noise.unsqueeze(1)
            output = model(noise.float().to(args.device), args.outKerSize)
            batch_loss = criterion(
                output.squeeze(1).to(args.device), truth.to(args.device),
                args.patchSize).to(args.device)
            batch_loss.backward()
            MyOptim.step()
            model.eval()
            train_loss += batch_loss.item()
        training_losses[epoch] = train_loss
        print("Training Loss: " + str(train_loss))

        val_loss = 0
        for i, data in enumerate(val_train, 0):
            val_truth, val_noise = data
            val_output = model(
                val_noise.unsqueeze(1).float().to(args.device),
                args.outKerSize)
            output_loss = criterion(
                val_output.squeeze(1).to(args.device),
                val_truth.to(args.device), args.patchSize).to(args.device)
            val_loss += output_loss.item()
        MyScheduler.step(torch.tensor([val_loss]))
        validation_losses[epoch] = val_loss
        print("Validation Loss: " + str(val_loss))
        # save the model
        model.eval()
        torch.save(model.state_dict(), os.path.join(args.outf, 'net.pth'))
    training = plt.plot(training_losses, label='Training')
    validation = plt.plot(validation_losses, label='Validation')
    plt.legend()
    plt.savefig(args.outf + "/lossplt.png")

    branch = get_all_histograms("./test.root")
    model.to('cpu')
    for image in range(10):

        data = get_bin_weights(branch, image).copy()
        np.savetxt(args.outf + '/truth#' + str(image) + '.txt', data)

        means = np.mean(data)
        stdevs = np.std(data)

        noisy = add_noise(data, args.sigma).copy()
        np.savetxt(args.outf + '/noisy#' + str(image) + '.txt', noisy)

        data_norm = (data - means) / stdevs
        np.savetxt(args.outf + '/truth_norm#' + str(image) + '.txt', data_norm)
        noisy_norm = (noisy - means) / stdevs
        np.savetxt(args.outf + '/noisy_norm#' + str(image) + '.txt',
                   noisy_norm)

        data_norm = torch.from_numpy(data_norm)
        noisy_norm = torch.from_numpy(noisy_norm)
        noisy_norm = noisy_norm.unsqueeze(0)
        noisy_norm = noisy_norm.unsqueeze(1)
        output_norm = model(
            noisy_norm.float(),
            args.outKerSize).squeeze(0).squeeze(0).detach().numpy()
        np.savetxt(args.outf + '/output_norm#' + str(image) + '.txt',
                   output_norm)
        output = (output_norm * stdevs) + means
        np.savetxt(args.outf + '/output#' + str(image) + '.txt', output)
        truth = data.numpy()
        noisy = noisy.numpy()
        diff = output - truth
        noisy_diff = noisy - truth
        np.savetxt(args.outf + '/diff#' + str(image) + '.txt', diff)
    model.to('cuda')
Ejemplo n.º 3
0
valid_loss = []
time_epoch = []
train_psnr = []
valid_psnr = []
gpu_size = []
v_gpu_size = []

epoche_number = 10
for i in range(epoche_number):

    # Let's train the model
    s = time.time()
    total_loss = 0.0
    total_iter = 0
    j = 0
    DenoiseModel.train()
    for image in dataset_train:
        j += 1
        #print('Batch iter: {} Beging traning GPU Memory allocated: {} MB'.format(j,torch.cuda.memory_allocated() / 1024**2))
        gpu_size.append(torch.cuda.memory_allocated() / 1024**2)
        image = image.resize_(1, image.shape[0], image.shape[1],
                              image.shape[2])
        noise = torch.randn(image.shape) * noise_level
        image_n = torch.add(image, noise)
        image = Variable(image).cuda()
        image_n = Variable(image_n).cuda()
        #print('Batch iter: {} before training traning GPU Memory allocated: {} MB'.format(j,torch.cuda.memory_allocated() / 1024**2))
        gpu_size.append(torch.cuda.memory_allocated() / 1024**2)

        optimizer.zero_grad()
        output = DenoiseModel(image_n)