def compute_optimal_params(subject_dir, method='hgl', sess_ix=None, random_state=None, **kwargs): randgen = check_random_state(random_state) get_data_ = mem.cache(get_data) subj_data = get_data_(subject_dir) if len(subj_data) < 4: raise ValueError('Incomplete data') # random session for training if sess_ix is None: sess_ix = randgen.randint(2) + 1 X = np.concatenate([d["data"] for d in subj_data if d["session"] == sess_ix], axis=0) # complementary session Y = np.concatenate([d["data"] for d in subj_data if d["session"] == 3 - sess_ix], axis=0) Theta = scipy.linalg.inv(Y.T.dot(Y) / Y.shape[0]) return cvl.cross_val(X, method=method, alpha_tol=1e-2, n_iter=1, optim_h=True, train_size=.99, test_size=0.01, model_prec=Theta, n_jobs=min({N_JOBS, 10}), random_state=random_state, tol=1e-3, **kwargs)
def eval_point(a, b, Z, alpha_tol=1e-2, n_jobs=1, verbose=0, n_iter=10, score_norm="KL", CV_norm="ell0", ips_flag=True, mx_type='small', random_state=None): train_size = .03 try: print "evaluating point ({}, {})".format(a, b) Theta = _get_mx(a, b, mx_type=mx_type) eigvals, eigvecs = scipy.linalg.eigh(Theta) M = eigvecs.dot(np.diag(1. / np.sqrt(eigvals))).dot(eigvecs.T) if score_norm != "ell0": sqrt_p = np.sqrt(Theta.shape[0]) Theta = sqrt_p * M X = Z.dot(M) if mx_type == "small": tree1 = [[0, 1], [2, 3]] alpha_star_hgl1, LL_hgl1 = \ covariance_learn.cross_val(X, model_prec=Theta, method='hgl', htree=tree1, alpha_tol=alpha_tol, n_iter=n_iter, train_size=train_size, test_size=.5, n_jobs=n_jobs, verbose=verbose, score_norm=score_norm, CV_norm=CV_norm, random_state=random_state, ips_flag=ips_flag) tree2 = [[0, 2], [1, 3]] alpha_star_hgl2, LL_hgl2 = \ covariance_learn.cross_val(X, model_prec=Theta, method='hgl', htree=tree2, alpha_tol=alpha_tol, n_iter=n_iter, train_size=train_size, test_size=.5, n_jobs=n_jobs, verbose=verbose, score_norm=score_norm, CV_norm=CV_norm, random_state=random_state, ips_flag=ips_flag) tree3 = [[0, 3], [1, 2]] alpha_star_hgl3, LL_hgl3 = \ covariance_learn.cross_val(X, model_prec=Theta, method='hgl', htree=tree3, alpha_tol=alpha_tol, n_iter=n_iter, train_size=train_size, test_size=.5, n_jobs=n_jobs, verbose=verbose, score_norm=score_norm, CV_norm=CV_norm, random_state=random_state, ips_flag=ips_flag) else: if mx_type in {'gael', 'ronald'}: tree = [[0, 1], [2, 3], [4, 5], [6, 7]] else: tree = [range(k * 4, (k + 1) * 4) for k in range(4)] alpha_star_hgl, LL_hgl = \ covariance_learn.cross_val(X, model_prec=Theta, method='hgl', htree=tree, alpha_tol=alpha_tol, n_iter=n_iter, train_size=train_size, test_size=.5, n_jobs=n_jobs, verbose=verbose, score_norm=score_norm, CV_norm=CV_norm, random_state=random_state, ips_flag=ips_flag) alpha_star_gl, LL_gl = \ covariance_learn.cross_val(X, model_prec=Theta, method='gl', alpha_tol=alpha_tol, n_iter=n_iter, train_size=train_size, test_size=.5, n_jobs=n_jobs, verbose=verbose, score_norm=score_norm, CV_norm=CV_norm, random_state=random_state, ips_flag=ips_flag) if mx_type == "small": print "\thgl1: {}, hgl2: {}, hgl3: {}, gl: {}".format(LL_hgl1[-1], LL_hgl2[-1], LL_hgl3[-1], LL_gl[-1]) return LL_hgl1[-1], LL_hgl2[-1], LL_hgl3[-1], LL_gl[-1] else: print "\thgl (alpha={}): {}\n\t gl (alpha={}): {}".format( alpha_star_hgl, LL_hgl[-1], alpha_star_gl, LL_gl[-1]) return LL_hgl[-1], LL_gl[-1] except ValueError: print "\tinvalid point" if mx_type == "small": return np.nan, np.nan, np.nan, np.nan else: return np.nan, np.nan