예제 #1
0
def mapper(key, output_collector):
    import mapreduce as GLOBAL
    Xtr = GLOBAL.DATA_RESAMPLED["X"][0]
    Xte = GLOBAL.DATA_RESAMPLED["X"][1]
    ytr = GLOBAL.DATA_RESAMPLED["y"][0]
    yte = GLOBAL.DATA_RESAMPLED["y"][1]

    alpha = float(key[0])
    l1, l2, tv = alpha * float(key[1]), alpha * float(key[2]), alpha * float(
        key[3])
    print("l1:%f, l2:%f, tv:%f" % (l1, l2, tv))
    print(key)
    class_weight = "auto"  # unbiased
    print(output_collector.output_dir)
    beta_start = GLOBAL.BETA_START.all()[os.path.basename(
        output_collector.output_dir)]
    mask = np.ones(Xtr.shape[0], dtype=bool)

    scaler = preprocessing.StandardScaler().fit(Xtr)
    Xtr = scaler.transform(Xtr)
    Xte = scaler.transform(Xte)
    A = GLOBAL.A

    info = [
        Info.converged, Info.num_iter, Info.time, Info.func_val, Info.mu,
        Info.gap
    ]
    conesta = algorithms.proximal.CONESTA()
    algorithm_params = dict(max_iter=50000, info=info)
    out_fista = os.path.join(WD, output_collector.output_dir,
                             "fista_ite_snapshots/")
    out_conesta = os.path.join(WD, output_collector.output_dir,
                               "conesta_ite_snapshots/")

    os.makedirs(out_fista, exist_ok=True)
    os.makedirs(out_conesta, exist_ok=True)

    snapshot_fista = AlgorithmSnapshot(out_fista, saving_period=1).save_fista
    snapshot_conesta = AlgorithmSnapshot(out_conesta,
                                         saving_period=1).save_conesta
    algorithm_params["callback_fista"] = snapshot_fista
    algorithm_params["callback_conesta"] = snapshot_conesta


    mod= estimators.LogisticRegressionL1L2TV(l1,l2,tv, A, algorithm=conesta,\
                                             algorithm_params=algorithm_params,\
                                             class_weight=class_weight)
    mod.fit(Xtr, ytr.ravel(), beta=beta_start)
    y_pred = mod.predict(Xte)
    proba_pred = mod.predict_probability(Xte)
    ret = dict(y_pred=y_pred,
               y_true=yte,
               proba_pred=proba_pred,
               beta=mod.beta,
               mask=mask,
               beta_start=beta_start)
    if output_collector:
        output_collector.collect(key, ret)
    else:
        return ret
예제 #2
0
def mapper(key, output_collector):
    import mapreduce as GLOBAL
    X = GLOBAL.DATA["X"]
    y = GLOBAL.DATA["y"]
    start_vector = GLOBAL.DATA["start_vector"]

    alpha = float(key[0])
    l1, l2, tv = alpha * float(key[1]), alpha * float(key[2]), alpha * float(
        key[3])
    print("l1:%f, l2:%f, tv:%f" % (l1, l2, tv))

    class_weight = "auto"  # unbiased
    mask = np.ones(X.shape[0], dtype=bool)

    scaler = preprocessing.StandardScaler().fit(X)
    X = scaler.transform(X)
    A = GLOBAL.A

    info = [
        Info.converged, Info.num_iter, Info.time, Info.func_val, Info.mu,
        Info.gap
    ]
    conesta = algorithms.proximal.CONESTA()
    algorithm_params = dict(max_iter=1000000, info=info)
    out = os.path.join(WD_CLUSTER,GLOBAL.DIR,"0",str(key[0])+"_"+ str(key[1]) + "_" +\
                                                      str(key[2]) +"_"+str(key[3]),"conesta_ite_snapshots/")
    os.makedirs(out, exist_ok=True)

    snapshot = AlgorithmSnapshot(out, saving_period=1).save_conesta
    algorithm_params["callback"] = snapshot



    mod= estimators.LogisticRegressionL1L2TV(l1,l2,tv, A, algorithm=conesta,\
                                             algorithm_params=algorithm_params,\
                                             class_weight=class_weight,\
                                             penalty_start=penalty_start,start_vector=start_vector)
    mod.fit(X, y.ravel())
    y_pred = mod.predict(X)
    proba_pred = mod.predict_probability(X)
    ret = dict(y_pred=y_pred,
               y_true=y,
               proba_pred=proba_pred,
               beta=mod.beta,
               mask=mask)
    if output_collector:
        output_collector.collect(key, ret)
    else:
        return ret
예제 #3
0
beta_start = arxiv["beta_start"]
assert X.shape == (199, 286214)
TAU = 0.2
EPS = 1e-6  # PRECISION FOR THE PAPER

###############################################################################
# Fit model

ALPHA = 0.01  #
l, k, g = ALPHA * np.array([0.3335, 0.3335, 0.333])

mask_ima = nibabel.load(os.path.join(WD, mask_filename))
Atv = tv.linear_operator_from_mask(mask_ima.get_data())

out = os.path.join(WD, "run", "conesta_ite_snapshots/")
snapshot = AlgorithmSnapshot(out, saving_period=1).save_conesta

info = [
    Info.converged, Info.num_iter, Info.time, Info.func_val, Info.mu, Info.gap,
    Info.converged, Info.fvalue
]
conesta = algorithms.proximal.CONESTA(callback_conesta=snapshot)
algorithm_params = dict(max_iter=1000000, info=info)
os.makedirs(out, exist_ok=True)

algorithm_params["callback"] = snapshot

mod = estimators.LinearRegressionL1L2TV(l,
                                        k,
                                        g,
                                        A=Atv,
예제 #4
0
ltv = global_pen * tv_ratio
ll1 = l1_ratio * global_pen * (1 - tv_ratio)
ll2 = (1 - l1_ratio) * global_pen * (1 - tv_ratio)
assert (np.allclose(ll1 + ll2 + ltv, global_pen))

#Compute A and mask
masks = []
INPUT_OBJECT_MASK_FILE_FORMAT = "mask_{o}.npy"
for i in range(3):
    filename = INPUT_OBJECT_MASK_FILE_FORMAT.format(o=i)
    masks.append(np.load(filename))
im_shape = config["im_shape"]
Atv = nesterov_tv.A_from_shape(im_shape)

########################################
snapshot = AlgorithmSnapshot(
    '/neurospin/brainomics/2014_pca_struct/lambda_max/',
    saving_period=1).save_conesta
mod = pca_tv.PCA_L1_L2_TV(n_components=3,
                          l1=ll1,
                          l2=ll2,
                          ltv=ltv,
                          Atv=Atv,
                          criterion="frobenius",
                          eps=1e-4,
                          max_iter=100,
                          inner_max_iter=int(1e4),
                          output=True,
                          callback=snapshot)
mod.fit(X[:250, :])
예제 #5
0
tv_ratio = 0.5#1e-05
l1_ratio = 0.5

ltv = global_pen * tv_ratio
ll1 = l1_ratio * global_pen * (1 - tv_ratio)
ll2 = (1 - l1_ratio) * global_pen * (1 - tv_ratio)
assert(np.allclose(ll1 + ll2 + ltv, global_pen))


Atv = nesterov_tv.A_from_shape(SHAPE)
start_vector=start_vectors.RandomStartVector(seed=42)



##############################################################################
snapshot = AlgorithmSnapshot('/neurospin/brainomics/2014_pca_struct/synthetic_data/data_100_100_bis/').save_nipals
t0 = utils.time_cpu()
mod = pca_tv.PCA_L1_L2_TV(n_components=3,
                                l1=ll1, l2=ll2, ltv=ltv,
                                Atv=Atv,
                                criterion="frobenius",
                                eps=1e-4,
                                max_iter=100,
                                inner_max_iter=int(1e4),
                                output=True,start_vector=start_vector,callback=snapshot)  

mod.fit(X)                                
time  = utils.time_cpu() - t0
print time
#############################################################################
예제 #6
0
ltv = global_pen * tv_ratio
ll1 = l1_ratio * global_pen * (1 - tv_ratio)
ll2 = (1 - l1_ratio) * global_pen * (1 - tv_ratio)
assert(np.allclose(ll1 + ll2 + ltv, global_pen))



nib_mask = nib.load(INPUT_MASK)
Atv = parsimony.functions.nesterov.tv.A_from_mask(nib_mask.get_data())


# PARSIMONY
################################################################################
from parsimony.algorithms.utils import AlgorithmSnapshot
snapshot = AlgorithmSnapshot('/neurospin/brainomics/2014_pca_struct/fmri/fmri_time/enet_1e-8/').save_nipals

mod = pca_tv.PCA_L1_L2_TV(n_components=3,
                                l1=ll1, l2=ll2, ltv=ltv,
                                Atv=Atv,
                                criterion="frobenius",
                                eps=1e-8,
                                max_iter=100,
                                inner_max_iter=int(1e4),
                                output=True,callback=snapshot)
mod.fit(X)
###############################################################################



  # Plot time and precision
예제 #7
0
ll1 = l1_ratio * global_pen * (1 - tv_ratio)
ll2 = (1 - l1_ratio) * global_pen * (1 - tv_ratio)
assert(np.allclose(ll1 + ll2 + ltv, global_pen))



mesh_coord, mesh_triangles = mesh_utils.mesh_arrays(os.path.join(TEMPLATE_PATH, "lrh.pial.gii"))
mask = np.load(os.path.join(INPUT_BASE_DIR, "mask.npy"))
import parsimony.functions.nesterov.tv as tv_helper
Atv = tv_helper.linear_operator_from_mesh(mesh_coord, mesh_triangles, mask=mask)
     

# PARSIMONY
########################################
from parsimony.algorithms.utils import AlgorithmSnapshot
snapshot = AlgorithmSnapshot('/neurospin/brainomics/2014_pca_struct/adni/adni_time/enet_1e-6/',saving_period=1).save_conesta

mod = pca_tv.PCA_L1_L2_TV(n_components=3,
                                l1=ll1, l2=ll2, ltv=ltv,
                                Atv=Atv,
                                criterion="frobenius",
                                eps=1e-6,
                                inner_eps=1e-1,
                                max_iter=100,
                                inner_max_iter=int(1e4),
                                output=True,callback=snapshot)  
mod.fit(X)                                
    
    
    
    
예제 #8
0
#Compute A and mask
masks = []
INPUT_OBJECT_MASK_FILE_FORMAT = "mask_{o}.npy"
for i in range(3):
    filename = INPUT_OBJECT_MASK_FILE_FORMAT.format(o=i)
    masks.append(np.load(filename))
im_shape = config["im_shape"]
Atv = nesterov_tv.A_from_shape(im_shape)

# PARSIMONY
########################################
from parsimony.algorithms.utils import AlgorithmSnapshot

snapshot = AlgorithmSnapshot(
    '/neurospin/brainomics/2014_pca_struct/dice5_ad_validation/time/tv_1e-20_1e-1/',
    saving_period=1).save_conesta
mod = pca_tv.PCA_L1_L2_TV(n_components=3,
                          l1=ll1,
                          l2=ll2,
                          ltv=ltv,
                          Atv=Atv,
                          criterion="frobenius",
                          eps=1e-20,
                          inner_eps=1e-1,
                          max_iter=100,
                          inner_max_iter=int(1e6),
                          output=True,
                          callback=snapshot)
mod.fit(X[0:250])
##############################################################################