Esempio n. 1
0
image_res = (1080, 1920)
roi_res = (880, 1600)  # regions of interest (to penalize for SGD)
dtype = torch.float32  # default datatype (Note: the result may be slightly different if you use float64, etc.)
device = torch.device('cpu')  # The gpu you are using

# Options for the algorithm
loss = nn.MSELoss().to(
    device)  # loss functions to use (try other loss functions!)
s0 = 1.0  # initial scale

root_path = os.path.join(opt.root_path, run_id,
                         chan_str)  # path for saving out optimized phases
print(root_path)
# Tensorboard writer
summaries_dir = os.path.join(root_path, 'summaries')
utils.cond_mkdir(summaries_dir)
writer = SummaryWriter(summaries_dir)

# Hardware setup for CITL
if opt.citl:
    camera_prop = PhysicalProp(channel,
                               laser_arduino=True,
                               roi_res=(roi_res[1], roi_res[0]),
                               slm_settle_time=0.12,
                               range_row=(220, 1000),
                               range_col=(300, 1630),
                               patterns_path=f'F:/citl/calibration',
                               show_preview=True)
else:
    camera_prop = None
Esempio n. 2
0
lr_s_phase = opt.lr_phase / 200
loss_model = nn.MSELoss().to(device)  # loss function for SGD (or perceptualloss.PerceptualLoss())
loss_phase = nn.MSELoss().to(device)
loss_mse = nn.MSELoss().to(device)
s0_phase = 1.0  # initial scale for phase optimization
s0_model = 1.0  # initial scale for model training
sa = torch.tensor(s0_phase, device=device, requires_grad=True)
sb = torch.tensor(0.3, device=device, requires_grad=True)

num_iters_model_update = 1  # number of iterations for model-training subloops
num_iters_phase_update = 1  # number of iterations for phase optimization

# Path for data
result_path = f'./models'
model_path = opt.model_path  # path for new model checkpoints
utils.cond_mkdir(model_path)
phase_path = opt.phase_path  # path of precomputed phase pool
data_path = f'./data/train1080'  # path of targets


# Hardware setup
camera_prop = PhysicalProp(channel, laser_arduino=True, roi_res=(roi_res[1], roi_res[0]), slm_settle_time=0.15,
                           range_row=(220, 1000), range_col=(300, 1630),
                           patterns_path=opt.calibration_path,  # path of 21 x 12 calibration patterns, see Supplement.
                           show_preview=True)

# Model instance to train
# Check propagation_model.py for the default parameter settings!
blur = utils.make_kernel_gaussian(0.85, 3)  # Optional, just be consistent with inference.
model = ModelPropagate(distance=prop_dist,
                       feature_size=feature_size,
Esempio n. 3
0
    recon_amp = recon_amp.squeeze().cpu().detach().numpy()
    target_amp = target_amp.squeeze().cpu().detach().numpy()

    if channel == 3:
        recon_amp = recon_amp.transpose(1, 2, 0)
        target_amp = target_amp.transpose(1, 2, 0)

    # calculate metrics
    psnr_val, ssim_val = utils.get_psnr_ssim(recon_amp, target_amp, multichannel=(channel == 3))

    idxs.append(target_idx)

    for domain in ['amp', 'lin', 'srgb']:
        psnrs[domain].append(psnr_val[domain])
        ssims[domain].append(ssim_val[domain])
        print(f'PSNR({domain}): {psnr_val[domain]},  SSIM({domain}): {ssim_val[domain]:.4f}, ')

    # save reconstructed image in srgb domain
    recon_srgb = utils.srgb_lin2gamma(np.clip(recon_amp**2, 0.0, 1.0))
    utils.cond_mkdir(recon_path)
    imageio.imwrite(os.path.join(recon_path, f'{target_idx}_{run_id}_{chan_strs[channel]}.png'), (recon_srgb * np.iinfo(np.uint8).max).round().astype(np.uint8))

# save it as a .mat file
data_dict = {}
data_dict['img_idx'] = idxs
for domain in ['amp', 'lin', 'srgb']:
    data_dict[f'ssims_{domain}'] = ssims[domain]
    data_dict[f'psnrs_{domain}'] = psnrs[domain]

sio.savemat(os.path.join(recon_path, f'metrics_{run_id}_{chan_strs[channel]}.mat'), data_dict)