def load_globals(config):
    import mapreduce as GLOBAL  # access to global variables
    GLOBAL.DATA = GLOBAL.load_data(config["data"])
    STRUCTURE = nibabel.load(config["structure"])
    A = tv_helper.linear_operator_from_mask(STRUCTURE.get_data())
    N_COMP = config["N_COMP"]
    GLOBAL.A, GLOBAL.STRUCTURE, GLOBAL.N_COMP = A, STRUCTURE, N_COMP
Exemplo n.º 2
0
def mapper(key, output_collector):
    import mapreduce as GLOBAL  # access to global variables:
    #raise ImportError("could not import ")
    # GLOBAL.DATA, GLOBAL.STRUCTURE, GLOBAL.A
    # GLOBAL.DATA ::= {"X":[Xtrain, ytrain], "y":[Xtest, ytest]}
    # key: list of parameters
    Xtr = GLOBAL.DATA_RESAMPLED["X"][0]
    Xte = GLOBAL.DATA_RESAMPLED["X"][1]
    ytr = GLOBAL.DATA_RESAMPLED["y"][0]
    yte = GLOBAL.DATA_RESAMPLED["y"][1]
    print key, "Data shape:", Xtr.shape, Xte.shape, ytr.shape, yte.shape
    STRUCTURE = GLOBAL.STRUCTURE
    #alpha, ratio_l1, ratio_l2, ratio_tv, k = key
    #key = np.array(key)
    penalty_start = GLOBAL.CONFIG["penalty_start"]
    class_weight = "auto"  # unbiased
    alpha = float(key[0])
    l1, l2, tv, k = alpha * float(key[1]), alpha * float(
        key[2]), alpha * float(key[3]), key[4]
    print "l1:%f, l2:%f, tv:%f, k:%i" % (l1, l2, tv, k)
    if k != -1:
        k = int(k)
        aov = SelectKBest(k=k)
        aov.fit(Xtr[..., penalty_start:], ytr.ravel())
        mask = STRUCTURE.get_data() != 0
        mask[mask] = aov.get_support()
        #print mask.sum()
        A = tv_helper.linear_operator_from_mask(mask)
        Xtr_r = np.hstack([
            Xtr[:, :penalty_start], Xtr[:, penalty_start:][:,
                                                           aov.get_support()]
        ])
        Xte_r = np.hstack([
            Xte[:, :penalty_start], Xte[:, penalty_start:][:,
                                                           aov.get_support()]
        ])
    else:
        mask = np.ones(Xtr.shape[0], dtype=bool)
        Xtr_r = Xtr
        Xte_r = Xte
        A = GLOBAL.A
    mod = LogisticRegressionL1L2TV(l1,
                                   l2,
                                   tv,
                                   A,
                                   penalty_start=penalty_start,
                                   class_weight=class_weight)
    mod.fit(Xtr_r, ytr)
    y_pred = mod.predict(Xte_r)
    proba_pred = mod.predict_probability(Xte_r)
    ret = dict(y_pred=y_pred,
               proba_pred=proba_pred,
               y_true=yte,
               beta=mod.beta,
               mask=mask)
    if output_collector:
        output_collector.collect(key, ret)
    else:
        return ret
def load_globals(config):
    import mapreduce as GLOBAL  # access to global variables
    GLOBAL.DATA = GLOBAL.load_data(config["data"])
    STRUCTURE = nibabel.load(config["mask_filename"])
    try:
        A = tv_helper.linear_operator_from_mask(STRUCTURE.get_data())
    except:
        A, _ = tv_helper.A_from_mask(STRUCTURE.get_data())
    GLOBAL.A, GLOBAL.STRUCTURE, GLOBAL.CONFIG = A, STRUCTURE, config
def init():
    INPUT_DATA_X = os.path.join(WD_ORIGINAL, 'X.npy')
    INPUT_DATA_y = os.path.join(WD_ORIGINAL, 'y.npy')
    INPUT_MASK_PATH = os.path.join(WD_ORIGINAL, 'mask.nii')
    #INPUT_LINEAR_OPE_PATH = '/neurospin/brainomics/2016_schizConnect/analysis/NUSDAST/Freesurfer/data/30yo/Atv.npz'
    # INPUT_CSV = '/neurospin/brainomics/2016_schizConnect/analysis/NUSDAST/Freesurfer/population_30yo.csv'

    os.makedirs(WD, exist_ok=True)
    shutil.copy(INPUT_DATA_X, WD)
    shutil.copy(INPUT_DATA_y, WD)
    shutil.copy(INPUT_MASK_PATH, WD)

    #shutil.copy(INPUT_LINEAR_OPE_PATH, WD)

    ## Create config file
    os.chdir(WD)
    X = np.load("X.npy")
    y = np.load("y.npy")

    if not os.path.exists(os.path.join(WD, "Atv.npz")):
        import nibabel
        import parsimony.functions.nesterov.tv as nesterov_tv
        from parsimony.utils.linalgs import LinearOperatorNesterov
        img = nibabel.load(os.path.join(WD, "mask.nii"))
        Atv = nesterov_tv.linear_operator_from_mask(img.get_data(),
                                                    calc_lambda_max=True)
        Atv.save(os.path.join(WD, "Atv.npz"))
        Atv_ = LinearOperatorNesterov(filename=os.path.join(WD, "Atv.npz"))
        assert Atv.get_singular_values(0) == Atv_.get_singular_values(0)
        assert np.allclose(Atv_.get_singular_values(0),
                           11.942045760666732,
                           rtol=1e-03,
                           atol=1e-03)
        assert np.all([
            a.shape == (X.shape[1] - penalty_start, X.shape[1] - penalty_start)
            for a in Atv
        ])

    if False and not os.path.exists(os.path.join(WD, "beta_start.npz")):
        betas = dict()
        import time
        alphas = [.01, 0.1, 1.0, 10]
        for alpha in alphas:
            mod = estimators.RidgeLogisticRegression(
                l=alpha, class_weight="auto", penalty_start=penalty_start)
            t_ = time.time()
            mod.fit(X, y.ravel())
            print(time.time() - t_)  # 11564
            betas["lambda_%.2f" % alpha] = mod.beta

        np.savez(os.path.join(WD, "beta_start.npz"), **betas)
        beta_start = np.load(os.path.join(WD, "beta_start.npz"))
        assert np.all(
            [np.all(beta_start[a] == betas[a]) for a in beta_start.keys()])

    ## Create config file

    #  ########################################################################
    #  Setting 1: 5cv + large range of parameters: cv_largerange
    #  with sub-sample training set with size 50, 100
    # 5cv/cv0*[_sub50]/refit/*

    # sub_sizes = [50, 100]
    sub_sizes = []

    cv_outer = [[
        tr, te
    ] for tr, te in StratifiedKFold(n_splits=NFOLDS_OUTER, random_state=42).
                split(np.zeros(y.shape[0]), y.ravel())]

    # check we got the same CV than previoulsy
    cv_old = json.load(
        open(os.path.join(WD_ORIGINAL, "config_modselectcv.json")))["resample"]
    cv_outer_old = [
        cv_old[k] for k in ['cv%02d/refit' % i for i in range(NFOLDS_OUTER)]
    ]
    assert np.all([
        np.all(np.array(cv_outer_old[i][0]) == cv_outer[i][0])
        for i in range(NFOLDS_OUTER)
    ])
    assert np.all([
        np.all(np.array(cv_outer_old[i][1]) == cv_outer[i][1])
        for i in range(NFOLDS_OUTER)
    ])
    # check END

    import collections
    cv = collections.OrderedDict()

    cv["refit/refit"] = [np.arange(len(y)), np.arange(len(y))]

    for cv_outer_i, (tr_val, te) in enumerate(cv_outer):
        # Simple CV
        cv["cv%02d/refit" % (cv_outer_i)] = [tr_val, te]

        # Nested CV
        # cv_inner = StratifiedKFold(y[tr_val].ravel(), n_folds=NFOLDS_INNER, random_state=42)
        # for cv_inner_i, (tr, val) in enumerate(cv_inner):
        #     cv["cv%02d/cvnested%02d" % ((cv_outer_i), cv_inner_i)] = [tr_val[tr], tr_val[val]]

        # Sub-sample training set with size 50, 100
        # => cv*_sub[50|100]/refit
        grps = np.unique(y[tr_val]).astype(int)
        ytr = y.copy()
        ytr[te] = np.nan
        g_idx = [np.where(ytr == g)[0] for g in grps]
        assert np.all([np.all(ytr[g_idx[g]] == g) for g in grps])

        g_size = np.array([len(g) for g in g_idx])
        g_prop = g_size / g_size.sum()

        for sub_size in sub_sizes:
            # sub_size = sub_sizes[0]
            sub_g_size = np.round(g_prop * sub_size).astype(int)
            g_sub_idx = [
                np.random.choice(g_idx[g], sub_g_size[g], replace=False)
                for g in grps
            ]
            assert np.all([np.all(y[g_sub_idx[g]] == g) for g in grps])
            tr_val_sub = np.concatenate(g_sub_idx)
            assert len(tr_val_sub) == sub_size
            assert np.all([idx in tr_val for idx in tr_val_sub])
            assert np.all(np.logical_not([idx in te for idx in tr_val_sub]))
            cv["cv%02d_sub%i/refit" %
               (cv_outer_i, sub_size)] = [tr_val_sub, te]

    cv = {k: [cv[k][0].tolist(), cv[k][1].tolist()] for k in cv}

    # Nested CV
    # assert len(cv_largerange) == NFOLDS_OUTER * NFOLDS_INNER + NFOLDS_OUTER + 1

    # Simple CV
    # assert len(cv) == NFOLDS_OUTER + 1

    # Simple CV + sub-sample training set with size 50, 100:
    assert len(cv) == NFOLDS_OUTER * (1 + len(sub_sizes)) + 1

    print(list(cv.keys()))

    # Large grid of parameters
    alphas = [0.001, 0.01, 0.1, 1.0]
    # alphas = [.01, 0.1, 1.0] # first ran with this grid
    tv_ratio = [0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
    l1l2_ratio = [0.1, 0.5, 0.9]
    #l1l2_ratio = [0, 0.1, 0.5, 0.9, 1.0] # first ran with this grid
    algos = ["enettv", "enetgn"]
    params_enet_tvgn = [
        list(param)
        for param in itertools.product(algos, alphas, l1l2_ratio, tv_ratio)
    ]
    assert len(params_enet_tvgn) == 240  # old 300

    params_enet = [
        list(param)
        for param in itertools.product(["enet"], alphas, l1l2_ratio, [0])
    ]
    assert len(params_enet) == 12  # old 15

    params = params_enet_tvgn + params_enet
    assert len(params) == 252  # 315
    # Simple CV
    # assert len(params) * len(cv) == 1890

    # Simple CV + sub-sample training set with size 50, 100:
    assert len(params) * len(cv) == 1512  # 1890

    config = dict(data=dict(X="X.npy", y="y.npy"),
                  params=params,
                  resample=cv,
                  structure_linear_operator_tv="Atv.npz",
                  beta_start="beta_start.npz",
                  map_output="5cv",
                  user_func=user_func_filename)
    json.dump(config, open(os.path.join(WD, "config_cv_largerange.json"), "w"))

    # Build utils files: sync (push/pull) and PBS
    import brainomics.cluster_gabriel as clust_utils
    cmd = "mapreduce.py --map  %s/config_cv_largerange.json" % WD_CLUSTER
    clust_utils.gabriel_make_qsub_job_files(WD,
                                            cmd,
                                            walltime="250:00:00",
                                            suffix="_cv_largerange",
                                            freecores=2)

    #  ########################################################################
    #  Setting 2: dcv + reduced range of parameters: dcv_reducedrange
    #  5cv/cv0*/cvnested0*/*

    cv_outer = [[
        tr, te
    ] for tr, te in StratifiedKFold(n_splits=NFOLDS_OUTER, random_state=42).
                split(np.zeros(y.shape[0]), y.ravel())]

    # check we got the same CV than previoulsy
    cv_old = json.load(
        open(os.path.join(WD_ORIGINAL, "config_modselectcv.json")))["resample"]
    cv_outer_old = [
        cv_old[k] for k in ['cv%02d/refit' % i for i in range(NFOLDS_OUTER)]
    ]
    assert np.all([
        np.all(np.array(cv_outer_old[i][0]) == cv_outer[i][0])
        for i in range(NFOLDS_OUTER)
    ])
    assert np.all([
        np.all(np.array(cv_outer_old[i][1]) == cv_outer[i][1])
        for i in range(NFOLDS_OUTER)
    ])
    # check END

    import collections
    cv = collections.OrderedDict()
    cv["refit/refit"] = [np.arange(len(y)), np.arange(len(y))]

    for cv_outer_i, (tr_val, te) in enumerate(cv_outer):
        cv["cv%02d/refit" % (cv_outer_i)] = [tr_val, te]
        cv_inner = StratifiedKFold(n_splits=NFOLDS_INNER,
                                   random_state=42).split(
                                       np.zeros(y[tr_val].shape[0]),
                                       y[tr_val].ravel())
        for cv_inner_i, (tr, val) in enumerate(cv_inner):
            cv["cv%02d/cvnested%02d" %
               ((cv_outer_i), cv_inner_i)] = [tr_val[tr], tr_val[val]]

    cv = {k: [cv[k][0].tolist(), cv[k][1].tolist()] for k in cv}
    #assert len(cv) == NFOLDS_OUTER + 1
    assert len(cv) == NFOLDS_OUTER * NFOLDS_INNER + NFOLDS_OUTER + 1
    print(list(cv.keys()))

    # Reduced grid of parameters
    alphas = [0.001, 0.01, 0.1, 1.0]
    tv_ratio = [0.2, 0.8]
    l1l2_ratio = [0.1, 0.9]
    algos = ["enettv", "enetgn"]
    params_enet_tvgn = [
        list(param)
        for param in itertools.product(algos, alphas, l1l2_ratio, tv_ratio)
    ]
    assert len(params_enet_tvgn) == 32  # 16

    params_enet = [
        list(param)
        for param in itertools.product(["enet"], alphas, l1l2_ratio, [0])
    ]
    assert len(params_enet) == 8  # 4

    params = params_enet_tvgn + params_enet
    assert len(params) == 40  # 20
    assert len(params) * len(cv) == 1240  # 620

    config = dict(data=dict(X="X.npy", y="y.npy"),
                  params=params,
                  resample=cv,
                  structure_linear_operator_tv="Atv.npz",
                  beta_start="beta_start.npz",
                  map_output="5cv",
                  user_func=user_func_filename)
    json.dump(config,
              open(os.path.join(WD, "config_dcv_reducedrange.json"), "w"))

    # Build utils files: sync (push/pull) and PBS
    import brainomics.cluster_gabriel as clust_utils
    cmd = "mapreduce.py --map  %s/config_dcv_reducedrange.json" % WD_CLUSTER
    clust_utils.gabriel_make_qsub_job_files(WD,
                                            cmd,
                                            walltime="250:00:00",
                                            suffix="_dcv_reducedrange",
                                            freecores=2)
Exemplo n.º 5
0
X[site.ravel() ==
  4, :] = X[site.ravel() == 4, :] - X[site.ravel() == 4, :].mean(axis=0)
X[site.ravel() ==
  5, :] = X[site.ravel() == 5, :] - X[site.ravel() == 5, :].mean(axis=0)
X[site.ravel() ==
  6, :] = X[site.ravel() == 6, :] - X[site.ravel() == 6, :].mean(axis=0)

#Stack covariates
X = np.hstack([Z, X])

n, p = X.shape
np.save(os.path.join(OUTPUT, "X.npy"), X)
np.save(os.path.join(OUTPUT, "y.npy"), y)

###############################################################################
###############################################################################
# precompute linearoperatorSS
#X = np.load("/neurospin/brainomics/2018_euaims_leap_predict_vbm/results/VBM/1.5mm/data/X.npy")
#y = np.load("/neurospin/brainomics/2018_euaims_leap_predict_vbm/results/VBM/1.5mm/data/y.npy")

mask = nibabel.load(os.path.join(OUTPUT, "mask.nii"))

import parsimony.functions.nesterov.tv as nesterov_tv
from parsimony.utils.linalgs import LinearOperatorNesterov

Atv = nesterov_tv.linear_operator_from_mask(mask.get_data(),
                                            calc_lambda_max=True)
Atv.save(os.path.join(OUTPUT, "Atv.npz"))
Atv_ = LinearOperatorNesterov(filename=os.path.join(OUTPUT, "Atv.npz"))
assert Atv.get_singular_values(0) == Atv_.get_singular_values(0)
Exemplo n.º 6
0
arxiv = np.load("ADNI_ADAS11-MCIc-CTL_N199.npz")
X = arxiv["X"]
y = arxiv["y"]
beta_start = arxiv["beta_start"]
assert X.shape == (199, 286214)
TAU = 0.2
EPS = 1e-6  # PRECISION FOR THE PAPER

###############################################################################
# Fit model

ALPHA = 0.01  #
l, k, g = ALPHA * np.array([0.3335, 0.3335, 0.333])

mask_ima = nibabel.load(os.path.join(WD, mask_filename))
Atv = tv.linear_operator_from_mask(mask_ima.get_data())

out = os.path.join(WD, "run", "conesta_ite_snapshots/")
snapshot = AlgorithmSnapshot(out, saving_period=1).save_conesta

info = [
    Info.converged, Info.num_iter, Info.time, Info.func_val, Info.mu, Info.gap,
    Info.converged, Info.fvalue
]
conesta = algorithms.proximal.CONESTA(callback_conesta=snapshot)
algorithm_params = dict(max_iter=1000000, info=info)
os.makedirs(out, exist_ok=True)

algorithm_params["callback"] = snapshot

mod = estimators.LinearRegressionL1L2TV(l,
    def test_tvhelper_linear_operator_from_mask(self):

        import parsimony.functions.nesterov.tv as tv

        ## Simple mask with offset
        shape = (5, 4)
        mask = np.zeros(shape)
        mask[1:(shape[0] - 1), 0:(shape[1] - 1)] = 1
        Ax_ = np.matrix(
        [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
         [0, -1, 0, 0, 1, 0, 0, 0, 0, 0],
         [0, 0, -1, 0, 0, 1, 0, 0, 0, 0],
         [0, 0, 0, -1, 0, 0, 1, 0, 0, 0],
         [0, 0, 0, 0, -1, 0, 0, 1, 0, 0],
         [0, 0, 0, 0, 0, -1, 0, 0, 1, 0],
         [0, 0, 0, 0, 0, 0, -1, 0, 0, 1],
         [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
         [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
         [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
        Ay_ = np.matrix(
        [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
         [0, -1, 1, 0, 0, 0, 0, 0, 0, 0],
         [0, 0, -1, 1, 0, 0, 0, 0, 0, 0],
         [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
         [0, 0, 0, 0, -1, 1, 0, 0, 0, 0],
         [0, 0, 0, 0, 0, -1, 1, 0, 0, 0],
         [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
         [0, 0, 0, 0, 0, 0, 0, -1, 1, 0],
         [0, 0, 0, 0, 0, 0, 0, 0, -1, 1],
         [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
        A = tv.linear_operator_from_mask(mask, offset=1)
        Ax, Ay, Az = A

        assert np.all(Ax.todense() == Ax_)
        assert np.all(Ay.todense() == Ay_)
        assert np.sum(Az.todense() == 0)

        #######################################################################
        ## GROUP TV
        shape = (6, 4)
        mask = np.zeros(shape, dtype=int)
        mask[:3, :3] = 1
        mask[3:6, 1:4] = 2
        Ax_ = np.matrix(
        [[-1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
         [0, -1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
         [0, 0, -1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
         [0, 0, 0, -1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
         [0, 0, 0, 0, -1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
         [0, 0, 0, 0, 0, -1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
         [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
         [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
         [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
         [0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 0, 0, 0, 0, 0],
         [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 0, 0, 0, 0],
         [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 0, 0, 0],
         [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 0, 0],
         [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 0],
         [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1],
         [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
         [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
         [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
        Ay_ = np.matrix(
        [[-1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
         [0, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
         [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
         [0, 0, 0, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
         [0, 0, 0, 0, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
         [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
         [0, 0, 0, 0, 0, 0, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
         [0, 0, 0, 0, 0, 0, 0, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
         [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
         [0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 1, 0, 0, 0, 0, 0, 0, 0],
         [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 1, 0, 0, 0, 0, 0, 0],
         [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
         [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 1, 0, 0, 0, 0],
         [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 1, 0, 0, 0],
         [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
         [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 1, 0],
         [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 1],
         [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
        A = tv.linear_operator_from_mask(mask)
        Ax, Ay, Az = A

        assert np.all(Ax.todense() == Ax_)
        assert np.all(Ay.todense() == Ay_)
        assert np.sum(Az.todense() == 0)

        #######################################################################
        ## test function tv on checkerboard
        #######################################################################
        dx = 5  # p should be odd
        shape = (dx, dx, dx)
        # linear_operator_from_masks
        mask = np.zeros(shape)
        mask[1:(dx - 1), 1:(dx - 1), 1:(dx - 1)] = 1
        p = np.prod((dx - 2, dx - 2, dx - 2))
        beta = np.zeros(p)
        beta[0:p:2] = 1  # checkerboard of 0 and 1
        A = tv.linear_operator_from_mask(mask)
        tvfunc = tv.TotalVariation(l=1., A=A)

        assert tvfunc.f(beta) == self._f_checkerboard_cube((dx - 2,
                                                            dx - 2,
                                                            dx - 2))

        # linear_operator_from_masks with group
        mask = np.zeros(shape)
        # 4 groups
        mask[0:(dx / 2), 0:(dx / 2), :] = 1
        mask[0:(dx / 2), (dx / 2):dx, :] = 2
        mask[(dx / 2):dx, 0:(dx / 2), :] = 3
        mask[(dx / 2):dx, (dx / 2):dx, :] = 4
        p = np.prod((dx, dx, dx))
        beta = np.zeros(p)
        beta[0:p:2] = 1  # checkerboard of 0 and 1
        A = tv.linear_operator_from_mask(mask)
        tvfunc = tv.TotalVariation(l=1., A=A)

        assert np.allclose(tvfunc.f(beta),
                       self._f_checkerboard_cube((dx / 2, dx / 2, dx)) +
                       self._f_checkerboard_cube((dx / 2, dx / 2 + 1, dx)) +
                       self._f_checkerboard_cube((dx / 2 + 1, dx / 2, dx)) +
                       self._f_checkerboard_cube((dx / 2 + 1, dx / 2 + 1, dx)))

        shape = (2, 3)
        mask = np.ones(shape)
        weights1D = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
        #weights2D = np.reshape(weights1D, shape)
        A_shape = tv.linear_operator_from_shape(shape, weights1D)
        #A_mask = tv.linear_operator_from_subset_mask(mask, weights2D)
        A_true = (np.array([[-1., 1., 0., 0., 0., 0.],
                           [0., -2., 2., 0., 0., 0.],
                           [0., 0., 0., 0., 0., 0.],
                           [0., 0., 0., -4., 4., 0.],
                           [0., 0., 0., 0., -5., 5.],
                           [0., 0., 0., 0., 0., 0.]]),
                  np.array([[-1., 0., 0., 1., 0., 0.],
                            [0., -2., 0., 0., 2., 0.],
                            [0., 0., -3., 0., 0., 3.],
                            [0., 0., 0., 0., 0., 0.],
                            [0., 0., 0., 0., 0., 0.],
                            [0., 0., 0., 0., 0., 0.]]),
                  np.array([[0., 0., 0., 0., 0., 0.],
                            [0., 0., 0., 0., 0., 0.],
                            [0., 0., 0., 0., 0., 0.],
                            [0., 0., 0., 0., 0., 0.],
                            [0., 0., 0., 0., 0., 0.],
                            [0., 0., 0., 0., 0., 0.]]))

        assert np.array_equal(A_true[0], A_shape[0].todense())
        assert np.array_equal(A_shape[0].todense(), A_shape[0].todense())
        assert np.array_equal(A_true[1], A_shape[1].todense())
        assert np.array_equal(A_shape[1].todense(), A_shape[1].todense())
        assert np.array_equal(A_true[2], A_shape[2].todense())
        assert np.array_equal(A_shape[2].todense(), A_shape[2].todense())
Exemplo n.º 8
0
#############################################################################

#File to store classification scores
f = open(
    os.path.join(BASE_PATH, 'results', 'Logistic_L1_L2_TV_withHC',
                 'parameters_sorescsv'), 'wb')
c = csv.writer(f, delimiter=',')
c.writerow([
    "alpha", "l1", "l2", "tv", "accuracy", "recall_0", "recall_1",
    "precision_0", "precision_1", "auc"
])

# Empirically set the global penalty, based on maximum l1 penaly
alpha = l1_max_logistic_loss(T, y)
conesta = algorithms.proximal.CONESTA(max_iter=500)
A = nesterov_tv.linear_operator_from_mask(mask_bool)

# Messages for communication between processes
FLAG_STOP_PROCESS = "STOP_WORK"
FLAG_PROCESS_FINISHED = "PROCESS_HAS_FINISHED"
nb_processes = 30
# Data structures for parallel processing
manager = Manager()  # multiprocessing.Manager()
work_queue, result_queue = manager.Queue(), manager.Queue()

# Add jobs in work_queue
for p in params:
    #print p
    work_queue.put(p)

# Add poison pills to stop the remote workers
Exemplo n.º 9
0
import pca_struct
import parsimony.functions.nesterov.tv as tv_helper

global_pen = 0.1
gn_ratio = 0.5
l1_ratio = 0.5

lgn = global_pen * gn_ratio
ll1 = l1_ratio * global_pen * (1 - gn_ratio)
ll2 = (1 - l1_ratio) * global_pen * (1 - gn_ratio)
assert(np.allclose(ll1 + ll2 + lgn, global_pen))



nib_mask = nib.load(INPUT_MASK)
Agn = sparse.vstack(tv_helper.linear_operator_from_mask(nib_mask.get_data()))

################################################################################
snapshot = AlgorithmSnapshot('/neurospin/brainomics/2014_pca_struct/fmri/fmri_time/gn_1e-8/').save_nipals

mod = pca_struct.PCAGraphNet(n_components=3,
                                l1=ll1, l2=ll2, lgn=lgn,
                                Agn=Agn,
                                criterion="frobenius",
                                eps=1e-8,
                                max_iter=500,
                                output=False,callback = snapshot)
mod.fit(X)