def adiabatic_opt( flim: Tuple, b1lim: Tuple, nf: int, nb: int, b1max: Number, device: torch.device = torch.device('cpu'), dtype: torch.dtype = torch.float32 ): target, cube, pIni, b1map = init(flim, b1lim, nf, nb, b1max, device, dtype) fn_err = metrics.err_l2z fn_pen = penalties.pen_null pIni0 = pIni for _ in range(1): pIni1, _ = optimizers.arctanLBFGS(target, cube, pIni0, fn_err=fn_err, fn_pen=fn_pen, niter=10, niter_gr=0, niter_rf=2, b1Map=b1map) rf = pIni1.rf # (1, xy, nT) rf = (rf + torch.flip(rf, dims=[2]))/2 pIni1.rf = rf pIni0 = pIni1 return
arg['niter_rf'] = dflt_arg('niter_rf', 2, lambda k: arg[k].item()) eta = dflt_arg('eta', 4, lambda k: float(arg[k].item())) print('eta: ', eta) err_meth = dflt_arg('err_meth', 'l2xy', lambda k: arg[k].item()) pen_meth = dflt_arg('pen_meth', 'l2', lambda k: arg[k].item()) err_hash = {'null': metrics.err_null, 'l2xy': metrics.err_l2xy, 'ml2xy': metrics.err_ml2xy, 'l2z': metrics.err_l2z} pen_hash = {'null': penalties.pen_null, 'l2': penalties.pen_l2} fn_err, fn_pen = err_hash[err_meth], pen_hash[pen_meth] # %% pulse design kw = {k: arg[k] for k in ('b1Map_', 'niter', 'niter_gr', 'niter_rf', 'doRelax')} if False: # optimize spiral shape parameters pulse, optInfos = optimizers.arctanLBFGS(target, cube, pulse, fn_err, fn_pen, eta=eta, **kw) else: # optimize directly on gradient samples (for fine tuning) pulse, optInfos = optimizers.arctanLBFGS_orig(target, cube, pulse, fn_err, fn_pen, eta=eta, **kw) # %% saving io.p2m(p2mName, pulse, {'optInfos': optInfos})