def group_sparse_covariance(emp_covs, n_samples, alpha, max_iter, tol): precisions = _group_sparse_covariance( emp_covs, n_samples, alpha, max_iter=max_iter, tol=tol, verbose=1, debug=False) return (dict(n_samples=n_samples, alpha=alpha, max_iter=max_iter, tol=tol, precisions=precisions))
def save_group_sparse_covariance(emp_covs, n_samples, alpha, max_iter, tol, cache_dir, num=0, random_init=True, debug=False): if random_init: rand_gen = np.random.RandomState( int(int(1000000 * time.time()) % 100000000)) precisions_init = np.empty(emp_covs.shape) for k in xrange(emp_covs.shape[-1]): precisions_init[..., k] = random_spd(emp_covs.shape[0], rand_gen=rand_gen) else: precisions_init = None probe = ScoreProbe() precisions = _group_sparse_covariance( emp_covs, n_samples, alpha, max_iter=max_iter, tol=tol, verbose=1, debug=debug, probe_function=probe, precisions_init=precisions_init) output_fname = os.path.join(cache_dir, "precisions_{num:d}.pickle".format(num=num)) pickle.dump(dict(n_samples=n_samples, alpha=alpha, max_iter=max_iter, tol=tol, objective=probe.objective, log_lik=probe.log_lik, wall_clock=probe.wall_clock, precisions=precisions, precisions_init=precisions_init), open(output_fname, "wb"))
def benchmark(parameters, output_d="_convergence"): _, _, gt = create_signals(parameters, output_dir=output_d) emp_covs, n_samples = empirical_covariances(gt["signals"]) print("alpha_max: %.3e, %.3e" % compute_alpha_max(emp_covs, n_samples)) sp = ScoreProbe(duality_gap=True) _group_sparse_covariance( emp_covs, n_samples, alpha=parameters["alpha"], tol=parameters["tol"], max_iter=parameters["max_iter"], probe_function=sp, verbose=1) return {"log_lik": np.asarray(sp.log_lik), "objective": np.asarray(sp.objective), "precisions": np.asarray(sp.precisions), "duality_gap": np.asarray(sp.duality_gap), "time": np.asarray(sp.wall_clock)}, gt