Esempio n. 1
0
def test_structure_error():
    """Test structure_error function."""
    a = np.eye(3) + np.eye(3, k=1)
    b = np.eye(3, k=-1) + np.eye(3)
    result = {
        'accuracy': 0.5555555555555556,
        'average_precision': 0.66666666666666663,
        'balanced_accuracy': 0.55,
        'dor': 1.4999999999999998,
        'f1': 0.6,
        'fall_out': 0.5,
        'false_omission_rate': 0.5,
        'fdr': 0.4,
        'fn': 2,
        'fp': 2,
        'mcc': 0.0,
        'miss_rate': 0.4,
        'nlr': 0.8,
        'npv': 0.5,
        'plr': 1.2,
        'precision': 0.6,
        'prevalence': 0.5555555555555556,
        'recall': 0.6,
        'specificity': 0.5,
        'tn': 2,
        'tp': 3
    }
    assert_equal(utils.structure_error(a, b), result)

    b = np.eye(3) + np.eye(3, k=-1) * 1e-3
    result = {
        'accuracy': 0.7777777777777778,
        'average_precision': 0.66666666666666663,
        'balanced_accuracy': 0.8,
        'dor': 0.0,
        'f1': 0.7499999999999999,
        'fall_out': 0.0,
        'false_omission_rate': 0.3333333333333333,
        'fdr': 0.0,
        'fn': 2,
        'fp': 0,
        'miss_rate': 0.4,
        'mcc': 0.0,
        'nlr': 0.4,
        'npv': 0.6666666666666666,
        'plr': 0,
        'precision': 1.0,
        'prevalence': 0.5555555555555556,
        'recall': 0.6,
        'specificity': 1.0,
        'tn': 4,
        'tp': 3
    }

    assert_equal(utils.structure_error(a, b, thresholding=True, eps=1e-2),
                 result)
Esempio n. 2
0
def test_structure_error():
    """Test structure_error function."""
    a = np.eye(3) + np.eye(3, k=1)
    b = np.eye(3, k=-1) + np.eye(3)
    result = {
        "accuracy": 0.5555555555555556,
        "average_precision": 0.66666666666666663,
        "balanced_accuracy": 0.55,
        "dor": 1.4999999999999998,
        "f1": 0.6,
        "fall_out": 0.5,
        "false_omission_rate": 0.5,
        "fdr": 0.4,
        "fn": 2,
        "fp": 2,
        "mcc": 0.0,
        "miss_rate": 0.4,
        "nlr": 0.8,
        "npv": 0.5,
        "plr": 1.2,
        "precision": 0.6,
        "prevalence": 0.5555555555555556,
        "recall": 0.6,
        "specificity": 0.5,
        "tn": 2,
        "tp": 3,
    }
    assert_equal(utils.structure_error(a, b), result)

    b = np.eye(3) + np.eye(3, k=-1) * 1e-3
    result = {
        "accuracy": 0.7777777777777778,
        "average_precision": 0.66666666666666663,
        "balanced_accuracy": 0.8,
        "dor": 0.0,
        "f1": 0.7499999999999999,
        "fall_out": 0.0,
        "false_omission_rate": 0.3333333333333333,
        "fdr": 0.0,
        "fn": 2,
        "fp": 0,
        "miss_rate": 0.4,
        "mcc": 0.0,
        "nlr": 0.4,
        "npv": 0.6666666666666666,
        "plr": 0,
        "precision": 1.0,
        "prevalence": 0.5555555555555556,
        "recall": 0.6,
        "specificity": 1.0,
        "tn": 4,
        "tp": 3,
    }

    assert_equal(utils.structure_error(a, b, thresholding=True, eps=1e-2), result)
Esempio n. 3
0
def tgl_results(X, y, K, K_obs, ells, **params):
    mdl = TimeGraphicalLasso(assume_centered=0,
                             verbose=0,
                             rtol=1e-5,
                             tol=1e-5,
                             max_iter=500,
                             rho=1. / np.sqrt(X.shape[0]))

    tic = time.time()
    ll = mdl.set_params(**params).fit(X, y)
    tac = time.time()
    iterations = ll.n_iter_
    F1score = utils.structure_error(K, ll.precision_)['f1']
    MSE_observed = None  # utils.error_norm(K_obs, ll.precision_ - ll.latent_)
    MSE_precision = utils.error_norm(K, ll.precision_)
    MSE_latent = None  # utils.error_norm(ells, ll.latent_)
    mean_rank_error = None  # utils.error_rank(ells, ll.latent_)

    res = dict(n_dim_obs=K.shape[1],
               time=tac - tic,
               iterations=iterations,
               F1score=F1score,
               MSE_precision=MSE_precision,
               MSE_observed=MSE_observed,
               MSE_latent=MSE_latent,
               mean_rank_error=mean_rank_error,
               likelihood=mdl.score(X, y),
               note=None,
               estimator=ll)
    return res
Esempio n. 4
0
def chandresekeran_results(data_grid, K, K_obs, ells, tau, alpha, **kwargs):
    emp_cov = np.array([
        empirical_covariance(x, assume_centered=True)
        for x in data_grid.transpose(2, 0, 1)
    ]).transpose(1, 2, 0)

    rho = 1. / np.sqrt(data_grid.shape[0])

    result = lvglasso(emp_cov, alpha, tau, rho)
    ma_output = Bunch(**result)

    R = np.array(ma_output.R).T
    S = np.array(ma_output.S).T
    L = np.array(ma_output.L).T

    ss = utils.structure_error(K, S)
    MSE_observed = utils.error_norm(K_obs, R)
    MSE_precision = utils.error_norm(K, S, upper_triangular=True)
    MSE_latent = utils.error_norm(ells, L)
    mean_rank_error = utils.error_rank(ells, L)

    res = dict(n_dim_obs=K.shape[1],
               time=ma_output.elapsed_time,
               iterations=np.max(ma_output.iter),
               MSE_precision=MSE_precision,
               MSE_observed=MSE_observed,
               MSE_latent=MSE_latent,
               mean_rank_error=mean_rank_error,
               note=None,
               estimator=ma_output,
               likelihood=likelihood_score(data_grid.transpose(2, 0, 1), R),
               latent=L)

    res = dict(res, **ss)
    return res
Esempio n. 5
0
def friedman_results(data_grid, K, K_obs, ells, alpha):
    from rpy2.robjects.packages import importr
    glasso = importr('glasso').glasso

    tic = time.time()
    iters = []
    precisions = []
    for d in data_grid.transpose(2, 0, 1):
        emp_cov = empirical_covariance(d)
        out = glasso(emp_cov, alpha)
        iters.append(int(out[-1][0]))
        precisions.append(np.array(out[1]))
    tac = time.time()
    iterations = np.max(iters)
    precisions = np.array(precisions)
    F1score = utils.structure_error(K, precisions)['f1']
    MSE_observed = None
    MSE_precision = utils.error_norm(K, precisions, upper_triangular=True)
    MSE_latent = None
    mean_rank_error = None

    res = dict(n_dim_obs=K.shape[1],
               time=tac - tic,
               iterations=iterations,
               F1score=F1score,
               MSE_precision=MSE_precision,
               MSE_observed=MSE_observed,
               MSE_latent=MSE_latent,
               mean_rank_error=mean_rank_error,
               likelihood=likelihood_score(data_grid.transpose(2, 0, 1),
                                           precisions),
               note=None,
               estimator=None)

    return res
Esempio n. 6
0
def glasso_results(data_grid, K, K_obs, ells, alpha):
    gl = GLsk(alpha=alpha, mode='cd', assume_centered=False, max_iter=500)

    tic = time.time()
    iters = []
    precisions = []
    for d in data_grid.transpose(2, 0, 1):
        gl.fit(d)
        iters.append(gl.n_iter_)
        precisions.append(gl.precision_)
    tac = time.time()
    iterations = np.max(iters)
    precisions = np.array(precisions)

    ss = utils.structure_error(K, precisions)  #, thresholding=1, eps=1e-5)

    MSE_observed = None
    MSE_precision = utils.error_norm(K, precisions, upper_triangular=True)
    MSE_latent = None
    mean_rank_error = None

    res = dict(n_dim_obs=K.shape[1],
               time=tac - tic,
               iterations=iterations,
               MSE_precision=MSE_precision,
               MSE_observed=MSE_observed,
               MSE_latent=MSE_latent,
               mean_rank_error=mean_rank_error,
               likelihood=likelihood_score(data_grid.transpose(2, 0, 1),
                                           precisions),
               note=None,
               estimator=gl)

    res = dict(res, **ss)
    return res
Esempio n. 7
0
def ltgl_results(X, y, K, K_obs, ells, **params):
    mdl = LatentTimeGraphicalLasso(assume_centered=0,
                                   verbose=0,
                                   rtol=1e-5,
                                   tol=1e-5,
                                   max_iter=1000,
                                   rho=1. / np.sqrt(X.shape[0]),
                                   update_rho_options=dict(mu=5))

    tic = time.time()
    ll = mdl.set_params(**params).fit(X, y)
    tac = time.time()
    iterations = ll.n_iter_
    ss = utils.structure_error(K, ll.precision_)  #, thresholding=1, eps=1e-5)
    MSE_observed = utils.error_norm(K_obs, ll.precision_ - ll.latent_)
    MSE_precision = utils.error_norm(K, ll.precision_, upper_triangular=True)
    MSE_latent = utils.error_norm(ells, ll.latent_)
    mean_rank_error = utils.error_rank(ells, ll.latent_)

    res = dict(n_dim_obs=K.shape[1],
               time=tac - tic,
               iterations=iterations,
               MSE_precision=MSE_precision,
               MSE_observed=MSE_observed,
               MSE_latent=MSE_latent,
               mean_rank_error=mean_rank_error,
               note=None,
               estimator=ll,
               likelihood=mdl.score(X, y),
               latent=ll.latent_)

    res = dict(res, **ss)
    return res
Esempio n. 8
0
def base_results(mdl, X, y, K, K_obs, ells, search_spaces=None, **params):
    ll = mdl.set_params(**params)

    tic = time.time()
    if search_spaces is None:
        ll.fit(X, y)
    else:
        ll = use_bscv(ll, search_spaces, X, y)
    tac = time.time()

    ss = utils.structure_error(K, ll.precision_)
    MSE_precision = utils.error_norm(K, ll.precision_, upper_triangular=True)

    res = dict(n_dim_obs=K.shape[1],
               time=tac - tic,
               iterations=ll.n_iter_,
               MSE_precision=MSE_precision,
               estimator=ll,
               likelihood=ll.score(X, y))

    if hasattr(ll, 'latent_'):
        res['MSE_observed'] = utils.error_norm(K_obs,
                                               ll.precision_ - ll.latent_)
        res['MSE_latent'] = utils.error_norm(ells, ll.latent_)
        res['mean_rank_error'] = utils.error_rank(ells, ll.latent_)

    res = dict(res, **ss)
    return res
Esempio n. 9
0
def wp_results(data_list, K, **params):
    n_iter = 1000
    mdl = wishart_process_.WishartProcess(verbose=True,
                                          n_iter=n_iter,
                                          **params)

    X = np.vstack(data_list)
    y = np.array([np.ones(x.shape[0]) * i
                  for i, x in enumerate(data_list)]).flatten().astype(int)

    tic = time.time()
    ll = mdl.fit(X, y)
    tac = time.time()

    #     mdl.likelihood(wp.D_map)
    #     mdl.loglikes_after_burnin.max()
    mdl.store_precision = True
    ss = utils.structure_error(K, ll.precision_, thresholding=False, eps=1e-3)
    MSE_precision = utils.error_norm(K, ll.precision_, upper_triangular=True)

    res = dict(n_dim_obs=K.shape[1],
               time=tac - tic,
               iterations=n_iter,
               MSE_precision=MSE_precision,
               estimator=ll,
               likelihood=ll.score(X, y))
    res = dict(res, **ss)
    return res
Esempio n. 10
0
def hallac_results(data_grid,
                   K,
                   K_obs,
                   ells,
                   beta,
                   alpha,
                   penalty=2,
                   tvgl_path=''):
    if tvgl_path:
        import sys
        sys.path.append(tvgl_path)
        import TVGL
    #     with suppress_stdout():
    tic = time.time()
    thetaSet, empCovSet, status, gvx = TVGL.TVGL(np.vstack(
        data_grid.transpose(2, 0, 1)),
                                                 data_grid.shape[0],
                                                 lamb=alpha,
                                                 beta=beta,
                                                 indexOfPenalty=penalty)
    tac = time.time()

    if status != "Optimal":
        print("not converged")
    precisions = np.array(thetaSet)
    ss = utils.structure_error(K, precisions)
    MSE_observed = None
    MSE_precision = utils.error_norm(K, precisions, upper_triangular=True)
    MSE_latent = None
    mean_rank_error = None

    res = dict(n_dim_obs=K.shape[1],
               time=tac - tic,
               iterations=gvx.n_iter_,
               MSE_precision=MSE_precision,
               MSE_observed=MSE_observed,
               MSE_latent=MSE_latent,
               mean_rank_error=mean_rank_error,
               likelihood=likelihood_score(data_grid.transpose(2, 0, 1),
                                           precisions),
               note=status,
               estimator=gvx)
    res = dict(res, **ss)

    return res
Esempio n. 11
0
def set_results(
        vs, model, name, i, labels_true, labels_pred, thetas_true_sparse,
        thetas_true_rep, obs_precs_sparse, obs_precs, tac):
    th = name in ['wp', 'ticc']
    vs.setdefault((name, i), {}).setdefault('model', []).append(model)
    vs.setdefault(
        (name, i),
        {}).setdefault('v_measure',
                       []).append(v_measure_score(labels_true, labels_pred))
    vs.setdefault((name, i), {}).setdefault('structure_error', []).append(
        structure_error(
            thetas_true_sparse, obs_precs_sparse, no_diagonal=True,
            thresholding=th, eps=1e-5))
    vs.setdefault(
        (name, i),
        {}).setdefault('error_norm',
                       []).append(error_norm_time(thetas_true_rep, obs_precs))
    vs.setdefault((name, i), {}).setdefault('error_norm_sparse', []).append(
        error_norm_time(thetas_true_sparse, obs_precs_sparse))
    vs.setdefault((name, i), {}).setdefault('time', []).append(tac)
Esempio n. 12
0
def lgl_results(data_grid, K, K_obs, ells, **params):
    mdl = LatentGraphicalLasso(assume_centered=0,
                               verbose=0,
                               rtol=1e-5,
                               tol=1e-5,
                               max_iter=500,
                               rho=1. / np.sqrt(data_grid.shape[0]))

    tic = time.time()
    iters = []
    precisions, latents = [], []
    for d in data_grid.transpose(2, 0, 1):
        mdl.set_params(**params).fit(d)
        iters.append(mdl.n_iter_)
        precisions.append(mdl.precision_)
        latents.append(mdl.latent_)
    tac = time.time()
    iterations = np.max(iters)
    precisions = np.array(precisions)
    latents = np.array(latents)

    F1score = utils.structure_error(K, precisions)['f1']
    MSE_observed = utils.error_norm(K_obs, precisions - latents)
    MSE_precision = utils.error_norm(K, precisions)
    MSE_latent = utils.error_norm(ells, latents)
    mean_rank_error = utils.error_rank(ells, latents)

    res = dict(n_dim_obs=K.shape[1],
               time=tac - tic,
               iterations=iterations,
               F1score=F1score,
               MSE_precision=MSE_precision,
               MSE_observed=MSE_observed,
               MSE_latent=MSE_latent,
               mean_rank_error=mean_rank_error,
               note=None,
               estimator=mdl)
    return res