if (0): tf_zernloss = experiments.lambda_zernike * reg.Reg_L2( muscat.TF_zernikefactors ) # general loss on the zernike modes - don't over estimate them by value! tf_icshiftloss = experiments.lambda_icshift * reg.Reg_L2( muscat.TF_shiftIcX + muscat.TF_shiftIcY) # fuse all loss functions tf_regloss += tf_zernloss tf_regloss += tf_icshiftloss '''Define Optimizer''' '''Negativity Constraint''' #tf_negsqrloss = reg.Reg_NegSqr(tf_helper.extract(tf.cast(muscat.TF_obj, tf.float32), muscat.mysize))#-tf.minimum(tf.reduce_min(muscat.TF_obj-1.),0) tf_negsqrloss = reg.Reg_NegSqr( muscat.TF_obj - myparams.nEmbb - myparams.dn) #-tf.minimum(tf.reduce_min(muscat.TF_obj-1.),0) tf_negsqrloss += reg.Reg_NegSqr(muscat.TF_obj_absorption) tf_negsqrloss *= experiments.lambda_neg # Correc the fwd model - not good here! tf_glob_real = tf.constant(0., name='tf_glob_real') tf_glob_imag = tf.constant(0., name='tf_glob_imag') tf_norm = tf.complex(tf_glob_real, tf_glob_imag) '''Define Loss-function''' if (0): print('-------> Losstype is L1') tf_fidelity = tf.reduce_mean( (tf.abs((muscat.tf_meas) - tf_fwd)) ) # allow a global phase parameter to avoid unwrapping effects elif (0):
# Total Variation print('We are using TV - Regularization') tf_tvloss = muscat.tf_lambda_tv * reg.Reg_TV( muscat.TF_obj, BetaVals=[muscat.dx, muscat.dy, muscat.dz], epsR=muscat.tf_eps, is_circ=True ) #Alernatively tf_total_variation_regularization # total_variation tf_tvloss += muscat.tf_lambda_tv * reg.Reg_TV( muscat.TF_obj_absorption, BetaVals=[muscat.dx, muscat.dy, muscat.dz], epsR=muscat.tf_eps, is_circ=True ) #Alernatively tf_total_variation_regularization # total_variation '''Negativity Constraint''' tf_negsqrloss = lambda_neg * reg.Reg_NegSqr(muscat.TF_obj) tf_negsqrloss += lambda_neg * reg.Reg_NegSqr(muscat.TF_obj_absorption) # Correc the fwd model - not good here! tf_norm = tf.complex(tf_global_phase, tf_global_abs) tf_fwd_corrected = tf_fwd + tf_norm #tf_fwd_corrected = (tf_fwd+1j*tf.cast(tf_global_phase, tf.complex64))/tf.cast(tf_global_abs, tf.complex64) '''Define Loss-function''' if (0): print('-------> ATTENTION Losstype is L1') tf_fidelity = tf.reduce_mean( (tf.abs(muscat.tf_meas - tf_fwd_corrected) )) # allow a global phase parameter to avoid unwrapping effects else: print('-------> ATTENTION: Losstype is L2') tf_fidelity = tf.reduce_mean(