def setUp(self, *args, **kwargs): M, N, K = 3, 4, 5 self.ig = ImageGeometry(M, N, K) self.x = self.ig.allocate('random', seed=1) self.b = self.ig.allocate('random', seed=2) self.eta = self.ig.allocate(0.1) self.operator = IdentityOperator(self.ig) scalar = 0.25 self.f1 = L2NormSquared() self.f2 = L1Norm() self.f3 = scalar * L2NormSquared() self.f4 = scalar * L1Norm() self.f5 = scalar * L2NormSquared(b=self.b) self.f6 = scalar * L1Norm(b=self.b) self.f7 = ZeroFunction() self.f8 = 5 * ConstantFunction(10) self.f9 = LeastSquares(self.operator, self.b, c=scalar) self.f10 = 0.5 * KullbackLeibler(b=self.b, eta=self.eta) self.f11 = KullbackLeibler(b=self.b, eta=self.eta) self.f12 = 10 self.list1 = [self.f1, self.f2, self.f3, self.f4, self.f5, \ self.f6, self.f7, self.f8, self.f9, self.f10, self.f11, self.f12]
def setup(data, dnoise): if dnoise == 's&p': n1 = applynoise.saltnpepper(data, salt_vs_pepper=0.9, amount=0.2, seed=10) elif dnoise == 'poisson': scale = 5 n1 = applynoise.poisson(data.as_array() / scale, seed=10) * scale elif dnoise == 'gaussian': n1 = applynoise.gaussian(data.as_array(), seed=10) else: raise ValueError('Unsupported Noise ', noise) noisy_data = ig.allocate() noisy_data.fill(n1) # Regularisation Parameter depending on the noise distribution if dnoise == 's&p': alpha = 0.8 elif dnoise == 'poisson': alpha = 1 elif dnoise == 'gaussian': alpha = .3 # fidelity if dnoise == 's&p': g = L1Norm(b=noisy_data) elif dnoise == 'poisson': g = KullbackLeibler(b=noisy_data) elif dnoise == 'gaussian': g = 0.5 * L2NormSquared(b=noisy_data) return noisy_data, alpha, g
def test_FISTA_catch_Lipschitz(self): if debug_print: print("Test FISTA catch Lipschitz") ig = ImageGeometry(127, 139, 149) initial = ImageData(geometry=ig) initial = ig.allocate() b = initial.copy() # fill with random numbers b.fill(numpy.random.random(initial.shape)) initial = ig.allocate(ImageGeometry.RANDOM) identity = IdentityOperator(ig) #### it seems FISTA does not work with Nowm2Sq norm2sq = LeastSquares(identity, b) if debug_print: print('Lipschitz', norm2sq.L) # norm2sq.L = None #norm2sq.L = 2 * norm2sq.c * identity.norm()**2 #norm2sq = OperatorCompositionFunction(L2NormSquared(b=b), identity) opt = {'tol': 1e-4, 'memopt': False} if debug_print: print("initial objective", norm2sq(initial)) try: alg = FISTA(initial=initial, f=L1Norm(), g=ZeroFunction()) self.assertTrue(False) except ValueError as ve: print(ve) self.assertTrue(True)
def setUp(self): ig = ImageGeometry(2, 3, 2) data = ig.allocate(1, dtype=np.float32) noisy_data = data + 1 # TV regularisation parameter self.alpha = 1 self.fidelities = [ 0.5 * L2NormSquared(b=noisy_data), L1Norm(b=noisy_data), KullbackLeibler(b=noisy_data, use_numba=False) ] F = self.alpha * MixedL21Norm() K = GradientOperator(ig) # Compute operator Norm normK = K.norm() # Primal & dual stepsizes self.sigma = 1. / normK self.tau = 1. / normK self.F = F self.K = K
def test_TranslateFunction(self): # Test TranslationFunction ig = ImageGeometry(4, 4) tmp = ig.allocate('random', seed=10) b = ig.allocate('random', seed=10) scalar = 0.4 tau = 0.05 list1 = [ L2NormSquared(), scalar * L2NormSquared(), scalar * L2NormSquared(b=b), L1Norm(), scalar * L1Norm(), scalar * L1Norm(b=b) ] list1_shift = [ L2NormSquared().centered_at(ig.allocate()), scalar * L2NormSquared().centered_at(ig.allocate()), scalar * L2NormSquared().centered_at(b), L1Norm().centered_at(ig.allocate()), scalar * L1Norm().centered_at(ig.allocate()), scalar * L1Norm().centered_at(b) ] out_gradient1 = ig.allocate() out_gradient2 = ig.allocate() out_proximal1 = ig.allocate() out_proximal2 = ig.allocate() out_proximal_conj1 = ig.allocate() out_proximal_conj2 = ig.allocate() for func, func_shift in zip(list1, list1_shift): # check call res1 = func(tmp) res2 = func_shift(tmp) self.assertNumpyArrayAlmostEqual(res1, res2) try: # check gradient res1_gradient = func.gradient(tmp) res2_gradient = func_shift.gradient(tmp) self.assertNumpyArrayAlmostEqual(res1_gradient.as_array(), res2_gradient.as_array()) # check gradient out func.gradient(tmp, out=out_gradient1) func_shift.gradient(tmp, out=out_gradient2) self.assertNumpyArrayAlmostEqual(out_gradient1.as_array(), out_gradient2.as_array()) except NotImplementedError: print('Function is not differentiable') # check proximal func.proximal(tmp, tau, out=out_proximal1) func_shift.proximal(tmp, tau, out=out_proximal2) self.assertNumpyArrayAlmostEqual(out_proximal1.as_array(), out_proximal2.as_array()) # check proximal conjugate func.proximal_conjugate(tmp, tau, out=out_proximal_conj1) func_shift.proximal_conjugate(tmp, tau, out=out_proximal_conj2) self.assertNumpyArrayAlmostEqual(out_proximal_conj1.as_array(), out_proximal_conj1.as_array())
def test_SumFunction(self): M, N, K = 3,4,5 ig = ImageGeometry(M, N, K) tmp = ig.allocate('random', seed=1) b = ig.allocate('random', seed=2) eta = ig.allocate(0.1) operator = IdentityOperator(ig) scalar = 0.25 f1 = L2NormSquared() f2 = L1Norm() f3 = scalar * L2NormSquared() f4 = scalar * L1Norm() f5 = scalar * L2NormSquared(b=b) f6 = scalar * L1Norm(b=b) f7 = ZeroFunction() f8 = 5 * ConstantFunction(10) f9 = LeastSquares(operator, b, c=scalar) f10 = 0.5*KullbackLeibler(b=b,eta = eta) f11 = KullbackLeibler(b=b, eta =eta) f12 = 10 # f10 = 0.5 * MixedL21Norm() # f11 = IndicatorBox(lower=0) list1 = [f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12] print('################### Check sum of two functions ################## \n') for func in list1: # check sum of two functions if isinstance(func, ScaledFunction): type_fun = ' scalar * ' + type(func.function).__name__ else: type_fun = type(func).__name__ if isinstance(func, Number): tmp_fun_eval = func else: tmp_fun_eval = func(tmp) sumf = f1 + func self.assertNumpyArrayAlmostEqual(sumf(tmp), f1(tmp) + tmp_fun_eval ) print('{} = ( {} + {} ) is OK'.format(type(sumf).__name__, type(f1).__name__, type_fun)) sumf1 = func + f1 self.assertNumpyArrayAlmostEqual(sumf1(tmp), tmp_fun_eval + f1(tmp)) print('Checking commutative') print('{} + ( {} + {} ) is OK\n'.format(type(sumf1).__name__, type_fun, type(f1).__name__)) print('################### Check Lispchitz constant ################## \n') for i,func in enumerate(list1): if isinstance(func, ScaledFunction): type_fun = ' scalar * ' + type(func.function).__name__ else: type_fun = type(func).__name__ try: # check Lispchitz sum of two functions print ("i", i,func.__class__.__name__) if isinstance(func, Number): tmp_fun_L = 0 else: tmp_fun_L = func.L sumf = f1 + func try: sumf.L==f1.L + tmp_fun_L except TypeError: print('Function {} has L = None'.format(type_fun)) except ValueError as nie: print (func.__class__.__name__, nie) print('\n################### Check Gradient ################## \n') for func in list1: if isinstance(func, ScaledFunction): type_fun = ' scalar * ' + type(func.function).__name__ else: type_fun = type(func).__name__ sumf = f1 + func # check gradient try: if isinstance(func, Number): tmp_fun_gradient = 0 else: tmp_fun_gradient = func.gradient(tmp) self.assertNumpyArrayAlmostEqual(sumf.gradient(tmp).as_array(), (f1.gradient(tmp) + tmp_fun_gradient).as_array()) except NotImplementedError: print("{} is not differentiable".format(type_fun)) print('\n################### Check Gradient Out ################## \n') out_left = ig.allocate() out_right1 = ig.allocate() out_right2 = ig.allocate() for i, func in enumerate(list1): if isinstance(func, ScaledFunction): type_fun = ' scalar * ' + type(func.function).__name__ else: type_fun = type(func).__name__ sumf = f1 + func # check gradient out try: if isinstance(func, Number): tmp_fun_gradient_out = 0 else: func.gradient(tmp, out = out_right2) tmp_fun_gradient_out = out_right2.as_array() #print('Check {} + {}\n'.format(type(f1).__name__, type_fun)) sumf.gradient(tmp, out = out_left) f1.gradient(tmp, out = out_right1) self.assertNumpyArrayAlmostEqual(out_left.as_array(), out_right1.as_array() + tmp_fun_gradient_out) except NotImplementedError: print("{} is not differentiable".format(type_fun))
def mae(dc1, dc2): ''' Returns the Mean Absolute error of two DataContainers ''' diff = dc1 - dc2 return L1Norm().__call__(diff) / dc1.size
noisy_data = ig.allocate() noisy_data.fill(n1) noisy_data = MO.direct(noisy_data) # Regularisation Parameter depending on the noise distribution if noise == 's&p': alpha = 0.8 elif noise == 'poisson': alpha = 1.0 elif noise == 'gaussian': alpha = .3 # Choose data fidelity dependent on noise type. if noise == 's&p': f2 = L1Norm(b=noisy_data) elif noise == 'poisson': f2 = KullbackLeibler(noisy_data) elif noise == 'gaussian': f2 = 0.5 * L2NormSquared(b=noisy_data) # Create operators op1 = GradientOperator(ig, correlation=GradientOperator.CORRELATION_SPACE) op2 = MO # Create BlockOperator operator = BlockOperator(op1, op2, shape=(2, 1)) # Create functions f = BlockFunction(alpha * MixedL21Norm(), f2) g = ZeroFunction()