def ichol_words(K, keys, eta): # K is a kernel # keys defines the sample ordering for the kernel (so, the first row of K corresponds o keys[0] # eta - threshold for the ichol procedure. # DEBUG: IO.writeNumpyArray('K0.txt', K) N = len(keys) print >> sys.stderr, "ichol of", N, "words", keys[:10], '...' model = ICD.fast_ichol(K, eta) # model = ICD.ichol(K, eta) model.keys = keys # This assertion is an invariant of Cholesky Decomposition for PSD matrices, but not for non-PSD K R = np.mat(model.R) assert common.norm(R.T * R - K) < 1e-5 return model
if mgd_lowest: from mgd_helpers import lower_form_order Jp = lower_form_order(J) else: Jp = J problem = NonlinearVariationalProblem(Rlhs - Rrhs, h, bcs=bcs, J=J, Jp=Jp) problem._constant_jacobian = True solver = NonlinearVariationalSolver(problem, options_prefix='linsys_', solver_parameters={'snes_type': 'ksponly'}) # solve system solver.solve() # compute norms l2err = norm(h - soln, norm_type='L2') h1err = norm(h - soln, norm_type='H1') l2directerr = norm(h - solnexpr, norm_type='L2') h1directerr = norm(h - solnexpr, norm_type='H1') PETSc.Sys.Print(l2err, h1err, l2directerr, h1directerr) # output checkpoint = DumbCheckpoint(simname, mode=FILE_CREATE) checkpoint.store(h) checkpoint.store(soln) checkpoint.store(mesh.coordinates) # diff diff = Function(h1, name='hdiff') diffproj = Projector(h-soln, diff) # options_prefix= 'masssys_' diffproj.project()
return np.mat(r) if __name__ == '__main__': nargs = len(sys.argv) if nargs == 1: # Test # setup data data = np.matrix('[1,3,1;1,4,1;1,-3,-5;2,2.5,2]') K = (data*data.T) K = K.tolist() eta = 0.01 # ichol model = ICD.ichol(K, eta) model2 = ICD.fast_ichol(np.array(K), eta) print 'K', common.norm(model2.K - model.K) print 'R', common.norm(model2.R - model.R) print 'perm', np.linalg.norm(model2.perm - model.perm) print 'D', np.linalg.norm(model2.D - model.D) print 'nu', np.linalg.norm(model2.nu - model.nu) # setup out of sample data z0 = [1, 3, 1] # same as the first row in data z1 = [3, -2, 1] z2 = [2, 1, 1] K_x0 = z0 * data.T K_x1 = z1 * data.T K_x2 = z2 * data.T K_X = np.mat([z0, z1, z2]) * data.T # project r0 = ICD.getRepresentations(model, K_x0.tolist()) r1 = ICD.getRepresentations(model, K_x1.tolist())
def test_robust_sampling(conf, training_result): """ Sampling rate vs Reconstruction Quality at fixed noise std. Parameters ---------- conf : conf_loader.Conf Experiment parameters training_result : touple Retrun values of function `training` Returns ------- srange : np.array sampling rate range used sampling_rate : float sampling rate used (bk_ser, fa_ser, rc_ser) : (np.array, np.array, np.array) SER for `k-best`, `f_avg` and `LSC` methods (bk_mse, fa_mse, rc_mse) : (np.array, np.array, np.array) mse for `k-best`, `f_avg` and `LSC` methods """ (sort, tros), (energy, comulated, bands), codebooks = training_result n_bands = conf.nbands # matrxi to vector (m2v) and vector to matrix (v2m) functions m2v, v2m = conf.vect_functions() subcfg = conf.testing['robust_sampling'] # Load testing set testing, Testing = common.load_dataset(conf.testingset_path(), conf.fformat, conf.size()) FlatTst = m2v(Testing)[:, sort] # f_avg sampling pattern Omega = energy.argsort()[::-1] # lsc sampling pattern Omegas = [c.sampling_pattern() for c in codebooks] shape = testing[0].shape n = np.prod(shape) N = len(testing) srange = np.logspace(*subcfg['sampling_range']) sigma = subcfg['noise_rate'] bk_ser = np.zeros(len(srange)) fa_ser = np.zeros(len(srange)) rc_ser = np.zeros(len(srange)) bk_mse = np.zeros(len(srange)) fa_mse = np.zeros(len(srange)) rc_mse = np.zeros(len(srange)) print('Sampling rate at fixed noise test:') for i, rate in enumerate(srange): print(f'\r {i+1:3d}/{len(srange)}', flush=True, end='') M = int(round(n * rate)) m = int(round(M / n_bands)) ms = lsc.num_samples(bands, m) M = np.sum(ms) smalls = [omega[:y] for omega, y in zip(Omegas, ms)] for idx in range(N): reference = common.norm(testing[idx]) X = FlatTst[idx] + common.noise(sigma, n) Xsbs = lsc.split(X, bands) Ysbs = lsc.sub_sample(Xsbs, Omegas, m) recovered = [ codebooks[b].reconstruct(Ysbs[b], smalls[b]) for b in range(len(bands)) ] Y = v2m((lsc.union(recovered))[tros], shape) y = common.norm(common.pos(common.ifft2(Y).real)) BK = X.copy()[tros] O = np.abs(BK).argsort()[::-1] BK[O[M:]] = 0 BK = v2m(BK, shape) bK = common.norm(common.pos(common.ifft2(BK).real)) FA = X.copy()[tros] FA[Omega[M:]] = 0 FA = v2m(FA, shape) fA = common.norm(common.pos(common.ifft2(FA).real)) fa_ser[i] += common.SER(reference, fA) / N bk_ser[i] += common.SER(reference, bK) / N rc_ser[i] += common.SER(reference, y) / N fa_mse[i] += mse(reference, fA) / N bk_mse[i] += mse(reference, bK) / N rc_mse[i] += mse(reference, y) / N print(' [done]') return srange, sigma, (bk_ser, fa_ser, rc_ser), (bk_mse, fa_mse, rc_mse)
Rlhs = action(a, x) Rrhs = u * rhsexpr * dx # create solvers J = derivative(Rlhs - Rrhs, x) problem = NonlinearVariationalProblem(Rlhs - Rrhs, x, J=J, Jp=J, bcs=[]) problem._constant_jacobian = True solver = NonlinearVariationalSolver(problem, options_prefix='linsys_', solver_parameters={'snes_type': 'ksponly'}) # solve system solver.solve() # compute norms l2err = norm(x - soln, norm_type='L2') l2directerr = norm(x - solnexpr, norm_type='L2') PETSc.Sys.Print(l2err, l2directerr) # output checkpoint = DumbCheckpoint(simname) checkpoint.store(x) checkpoint.store(soln) checkpoint.store(mesh.coordinates) diff = Function(l2, name='diff') diffproj = Projector(x - soln, diff) # options_prefix= 'masssys_' diffproj.project() checkpoint.store(diff) directdiff = Function(l2, name='directdiff')
def test_robust_visual(conf, training_result, idx): """ Noise std vs Reconstruction Quality at fixed sampling rate visual test. Parameters ---------- conf : conf_loader.Conf Experiment parameters training_result : touple Retrun values of function `training` idx : int Id of image to test Returns ------- srange : np.array noise std range used sampling_rate : float sampling rate used (bk_ser, fa_ser, rc_ser) : (np.array, np.array, np.array) SER for `k-best`, `f_avg` and `LSC` methods at different noise std (bk_mse, fa_mse, rc_mse) : (np.array, np.array, np.array) mse for `k-best`, `f_avg` and `LSC` methods at different noise std """ (sort, tros), (energy, comulated, bands), codebooks = training_result n_bands = conf.nbands # matrxi to vector (m2v) and vector to matrix (v2m) functions m2v, v2m = conf.vect_functions() subcfg = conf.testing['robust_reconstruction_visual'] testing, Testing = common.load_dataset(conf.testingset_path(), conf.fformat, conf.size()) FlatTst = m2v(Testing)[:, sort] # f_avg sampling pattern Omega = energy.argsort()[::-1] # lsc sampling pattern Omegas = [c.sampling_pattern() for c in codebooks] shape = testing[0].shape n = np.prod(shape) srange = np.logspace(*subcfg['noise_range']) sampling_rate = subcfg['sampling_rate'] W, H = shape Wt = W * 3 Ht = H * len(srange) res = np.zeros((Wt, Ht)) print(f'Robust Reconstruction Quality Visual Test (img {idx}):') for i, sigma in enumerate(srange): print(f'\r {i+1:3d}/{len(srange)}', flush=True, end='') X = FlatTst[idx] + common.noise(sigma, n) M = int(round(sampling_rate * n)) m = int(round(M / n_bands)) ms = lsc.num_samples(bands, m) M = np.sum(ms) smalls = [omega[:y] for omega, y in zip(Omegas, ms)] Xsbs = lsc.split(X, bands) Ysbs = lsc.sub_sample(Xsbs, Omegas, m) recovered = [ codebooks[b].reconstruct(Ysbs[b], smalls[b]) for b in range(len(bands)) ] Y = v2m((lsc.union(recovered))[tros], shape) y = common.norm(common.pos(common.ifft2(Y).real)) BK = X.copy()[tros] O = np.abs(BK).argsort()[::-1] BK[O[M:]] = 0 BK = v2m(BK, shape) bK = common.norm(common.pos(common.ifft2(BK).real)) FA = X.copy()[tros] FA[Omega[M:]] = 0 FA = v2m(FA, shape) fA = common.norm(common.pos(common.ifft2(FA).real)) res[:W, H * i:H * (i + 1)] = bK res[W:2 * W, H * i:H * (i + 1)] = fA res[2 * W:3 * W, H * i:H * (i + 1)] = y print('\t[done]') return srange, res
def training(conf): """Training step. Parameters ---------- conf : conf_loader.Conf Experiment parameters Returns ------- (sort, tros) : ([int], [int]) Sorting order, direct and inverse (energy, comulated, bands) : (np.array, np.array, [int]) Average energy, comulated average energy and band splitting codebooks : [Codebook] codebooks per band """ print('Loading training dataset...', end='', flush=True) n_bands = conf.nbands # matrxi to vector (m2v) and vector to matrix (v2m) functions m2v, v2m = conf.vect_functions() training, Training = common.load_dataset(conf.trainingset_path(), fformat=conf.fformat, size=conf.size()) print(' [done]') """ Extract stastics. """ print('Extracting statistics and computing bands...', end='', flush=True) shape = training[0].shape n = np.prod(shape) mag, mag_std, phs, phs_std = common.retrive_basic_stats(Training) # direct sorting sort = np.arange(n) if conf.training['sort'] == 'random': sort = np.random.choice(n, size=n, replace=False) elif conf.training['sort'] == 'energy': sort = m2v(mag).argsort()[::-1] # inverse sorting tros = common.inv(sort) """ Generate bands. """ energy = common.norm(m2v(mag_std)[sort]) comulated = common.norm(common.comulate(energy)) bands = lsc.divide(comulated, n_bands) print(' [done]') """ Check if codebook is cached. """ if not os.path.isdir('codebooks'): os.mkdir('codebooks') codebook_path = f'codebooks/{conf.codebook_name()}' if os.path.isfile(codebook_path): print('Loading existing codebooks...', end='', flush=True) with open(codebook_path, 'rb') as f: codebooks = pickle.load(f) else: print('Computing codebooks...', end='', flush=True) """ Prepare dataset for codes generation. """ n_levels = conf.training['n_levels'] n_codes = conf.training['n_codes'] batch_size = conf.training['batch'] normalize = m2v(mag)[sort] discretize = normalize / n_levels FlatTrn = m2v(Training)[:, sort] DiscTrn = np.round(FlatTrn / discretize) * discretize SBsTrn = lsc.split(DiscTrn, bands) """ Compute codebook for each sub-band. """ codebooks = lsc.gen_codebooks(SBsTrn, n_codes, mode='ReIm', batch_size=batch_size) with open(codebook_path, 'wb') as f: pickle.dump(codebooks, f) print(' [done]') return (sort, tros), (energy, comulated, bands), codebooks
from mgd_helpers import lower_form_order Jp = lower_form_order(J) else: Jp = J problem = NonlinearVariationalProblem(Rlhs - Rrhs, x, bcs=fullbcs, J=J, Jp=Jp) problem._constant_jacobian = True solver = NonlinearVariationalSolver(problem, options_prefix='linsys_', solver_parameters={'snes_type': 'ksponly'}) # solve system solver.solve() # compute norms hl2err = norm(h - hsoln, norm_type='L2') ul2err = norm(u - usoln, norm_type='L2') uhdiverr = norm(u - usoln, norm_type='Hdiv') hl2directerr = norm(h - hsolnexpr, norm_type='L2') ul2directerr = norm(u - vecsolnexpr, norm_type='L2') uhdivdirecterr = norm(u - vecsolnexpr, norm_type='Hdiv') PETSc.Sys.Print(hl2err, ul2err, uhdiverr, hl2directerr, ul2directerr, uhdivdirecterr) # output checkpoint = DumbCheckpoint(simname, mode=FILE_CREATE) checkpoint.store(x) checkpoint.store(hsoln) checkpoint.store(usoln) checkpoint.store(mesh.coordinates, name='coords')
def _estimate_error_norm(self, K, h, scale): return norm(self._estimate_error(K, h) / scale)
dW2 = -np.outer(h1, e) elif lrule == Learning.OJA or lrule == Learning.OJA_FEED: dW2 = -np.outer(h1, e) dW0 = get_oja_deriv(x, yf0, W0, act.deriv(yf0)) dW1 = get_oja_deriv(h0, yf1, W1, act.deriv(yf1)) # dW0 = np.zeros(dW0.shape) # dW1 = np.zeros(dW1.shape) if lrule == Learning.OJA_FEED: dB0 = get_oja_deriv(e, yf0, B0, act.deriv(yf0)) dB1 = get_oja_deriv(e, yf1, B1, act.deriv(yf1)) B0 += lrate * norm(dB0) B1 += lrate * norm(dB1) W0 += lrate * norm(dW0) W1 += lrate * norm(dW1) W2 += lrate * norm(dW2) errors.append(error) y_acc.append(y) error = sum(errors) / len(errors) if epoch % 25 == 0 or epoch == epochs - 1: print "{}, Epoch {}, error {}".format(lrule, epoch, error) error_acc.append(error)