def load_model_kpn_1f(cfg): if cfg.dynamic.frame_size == 128: return KPN_1f(color=True, kernel_size=[9], burst_length=cfg.input_N, blind_est=True), LossFunc(tensor_grad=~cfg.blind, alpha=1.0) else: return KPN_1f_fs64(color=True, kernel_size=[9], burst_length=cfg.input_N, blind_est=True), LossFunc(tensor_grad=~cfg.blind, alpha=1.0)
def load_model_kpn(cfg): # return KPN_1f_model(color=True,kernel_size=[cfg.kpn_frame_size],burst_length=cfg.input_N,blind_est=True),LossFunc(tensor_grad=~cfg.blind,alpha=1.0) return KPN_model(color=True, kernel_size=[cfg.kpn_frame_size], burst_length=cfg.input_N, blind_est=True), LossFunc(tensor_grad=~cfg.blind, recon_l1=cfg.recon_l1)
def load_model_kpn(cfg, num_frames): return KPN_model(color=True, burst_length=num_frames, blind_est=True, kernel_size=[cfg.kpn_frame_size], cascade=cfg.kpn_cascade_output), LossFunc( tensor_grad=~cfg.blind, alpha=1.0)
def load_model_kpn_1f_cls(cfg): if cfg.dynamic.frame_size in [64, 128]: return KPN_1f_cls(color=True, kernel_size=[cfg.kpn_1f_frame_size], burst_length=cfg.input_N, blind_est=True, filter_thresh=cfg.kpn_filter_onehot, cascade=cfg.kpn_cascade_output, frame_size=cfg.dynamic.frame_size), LossFunc( tensor_grad=~cfg.blind, alpha=1.0) elif cfg.dynamic.frame_size == 32: return KPN_1f_cls_fs32(color=True, kernel_size=[cfg.kpn_1f_frame_size], burst_length=cfg.input_N, blind_est=True, filter_thresh=cfg.kpn_filter_onehot, cascade=cfg.kpn_cascade_output), LossFunc( tensor_grad=~cfg.blind, alpha=1.0) else: raise KeyError(f"Uknown frame size [{cfg.dynamic.frame_size}]")
def load_model_kpn_1f(cfg): if cfg.dynamic.frame_size == 128: return KPN_1f(color=True, kernel_size=[cfg.kpn_1f_frame_size], burst_length=cfg.input_N, blind_est=True), LossFunc(tensor_grad=~cfg.blind, alpha=1.0) elif cfg.dynamic.frame_size == 64: return KPN_1f_fs64(color=True, kernel_size=[cfg.kpn_1f_frame_size], burst_length=cfg.input_N, blind_est=True), LossFunc(tensor_grad=~cfg.blind, alpha=1.0) elif cfg.dynamic.frame_size == 32: return KPN_1f_fs32(color=True, kernel_size=[cfg.kpn_1f_frame_size], burst_length=cfg.input_N, blind_est=True), LossFunc(tensor_grad=~cfg.blind, alpha=1.0) else: raise KeyError("Uknown frame size [{cfg.dynamic.frame_size}]")
def get_kpn_model(cfg): model = KPN_model(color=True, burst_length=cfg.nframes, blind_est=True, kernel_size=[5], cascade=False) model = model.to(cfg.gpuid, non_blocking=True) loss_fxn_base = LossFunc(tensor_grad=True, alpha=1.0) loss_fxn_base = loss_fxn_base.to(cfg.gpuid, non_blocking=True) optimizer = optim.Adam(model.parameters(), lr=1e-3) # -- create closure for loss -- def wrap_loss_fxn(denoised, gt_img, denoised_frames, step): gt_img_nmlz = gt_img - 0.5 #gt_img.mean() loss_basic, loss_anneal = loss_fxn_base(denoised_frames, denoised, gt_img_nmlz, step) return loss_basic + loss_anneal loss_fxn = wrap_loss_fxn # -- create empty scheduler -- def scheduler_fxn(epoch): pass # -- wrap call function for interface -- forward_fxn = model.forward def wrap_forward(dyn_noisy, noise_info): noisy = dyn_noisy - 0.5 #dyn_noisy.mean() kpn_stack = rearrange(noisy, 't b c h w -> b t c h w') kpn_cat = rearrange(noisy, 't b c h w -> b (t c) h w') denoised, denoised_ave, filters = forward_fxn(kpn_cat, kpn_stack) denoised_ave += 0.5 return denoised_ave, denoised model.forward = wrap_forward return model, loss_fxn, optimizer, scheduler_fxn
def load_model_kpn(cfg): return KPN_model(color=True, burst_length=cfg.input_N, blind_est=True, kernel_size=[cfg.kpn_frame_size]), LossFunc()
def load_model_stn(cfg): img_shape = (cfg.dynamic.frame_size, cfg.dynamic.frame_size, 3) return STNBurst(img_shape), LossFunc(tensor_grad=~cfg.blind)
def load_model_kpn(cfg): return KPN_model(color=True, burst_length=cfg.input_N, blind_est=True), LossFunc()