import dist_pd.penalties
import dist_pd.primal_dual
import dist_pd.distmat
from dist_pd.optimal_paramset import BddParamSet, UnbddParamSet
import importlib
import numpy as np
from scipy.sparse import dia_matrix
import scipy.io
import h5py
import sys
import time
from argparser import parser_distributed

args = parser_distributed("Graph-guided fused lasso, optimal-rate iteration.",
                          1,
                          "../data/Zhu_1000_10_5000_20_0.7_100",
                          255.6824**2,
                          default_output_prefix="ggfl_opt_")
ngpu = args.ngpus
nslices = args.nslices

os.environ['CUDA_VISIBLE_DEVICES'] = ','.join([str(i) for i in range(ngpu)
                                               ])  # select devices

if not args.use_cpu:
    devices = sum([nslices * ['/gpu:%d' % (i, )] for i in range(ngpu)], [])
else:
    devices = (['/cpu:0'])
print("device_list: {}".format(devices))

iters = args.iters
import dist_pd.penalties
import dist_pd.split_22
import dist_pd.distmat
import dist_pd.utils
import dist_pd.partitioners
import numpy as np
from scipy.sparse import dia_matrix
import scipy.io
import h5py
import sys
import time
from argparser import parser_distributed

args = parser_distributed("Overlapping group lasso, distributed mode.",
                          1,
                          "../data/ogrp_100_100_10_5000",
                          164.9960**2,
                          default_output_prefix="lgl_fb_")

ngpu = args.ngpus
nslices = args.nslices

os.environ['CUDA_VISIBLE_DEVICES'] = ','.join([str(i) for i in range(ngpu)
                                               ])  # select devices

if not args.use_cpu:
    devices = sum([nslices * ['/gpu:%d' % (i, )] for i in range(ngpu)], [])
else:
    devices = (['/cpu:0'])
print("device list: {}".format(devices))
Example #3
0
import dist_pd.losses
import dist_pd.penalties
import dist_pd.split_22
import dist_pd.distmat
import dist_pd.utils
import dist_pd.partitioners
import numpy as np 
from scipy.sparse import dia_matrix
import scipy.io
import h5py
import sys
import time
from dist_pd.optimal_paramset import BddParamSet, UnbddParamSet
from argparser import parser_distributed

args = parser_distributed("Latent group lasso, optimal-rate iteration.", 1, "../data/ogrp_100_100_10_5000", 164.9960**2, default_output_prefix="lgl_opt_")

ngpu = args.ngpus
nslices = args.nslices

os.environ['CUDA_VISIBLE_DEVICES'] = ','.join([str(i) for i in range(ngpu)]) # select devices

if not args.use_cpu:
    devices=sum([nslices*['/gpu:%d' % (i,)] for i in range(ngpu)], [])
else:
    devices=(['/cpu:0'])
print("device list: {}".format(devices))

iters = args.iters
interval = args.interval
import dist_pd.primal_dual
import dist_pd.distmat
import dist_pd.partitioners
import dist_pd.utils
import numpy as np
from scipy.sparse import dia_matrix
import scipy.io
import h5py
import sys
import time

from argparser import parser_distributed

args = parser_distributed(
    "Overlapping group lasso, FB splitting-based iterations",
    1,
    "../data/ogrp_100_100_10_5000",
    164.9960**2,
    default_output_prefix="ogl_fb_")

ngpu = args.ngpus
nslices = args.nslices

os.environ['CUDA_VISIBLE_DEVICES'] = ','.join([str(i) for i in range(ngpu)
                                               ])  # select devices

if not args.use_cpu:
    devices = sum([nslices * ['/gpu:%d' % (i, )] for i in range(ngpu)], [])
else:
    devices = (['/cpu:0'])
print("device list: {}".format(devices))
Example #5
0
import dist_pd.utils
import dist_pd.partitioners
import numpy as np
from scipy.sparse import dia_matrix
import scipy.io
import h5py
import sys
import time
from dist_pd.optimal_paramset import BddStocParamSet, UnbddStocParamSet

from argparser import parser_distributed

args = parser_distributed("Overlapping group lasso, stochastic iteration.",
                          1,
                          "../data/ogrp_100_100_10_5000",
                          164.9960**2,
                          default_output_prefix="ogl_stoc_",
                          stoc=True,
                          default_s=300000)

ngpu = args.ngpus
nslices = args.nslices

os.environ['CUDA_VISIBLE_DEVICES'] = ','.join([str(i) for i in range(ngpu)
                                               ])  # select devices

if not args.use_cpu:
    devices = sum([nslices * ['/gpu:%d' % (i, )] for i in range(ngpu)], [])
else:
    devices = (['/cpu:0'])
print("device list: {}".format(devices))
import dist_pd.distmat
from dist_pd.optimal_paramset import BddStocParamSet, UnbddStocParamSet

import numpy as np
from scipy.sparse import dia_matrix
import scipy.io
import h5py
import sys
import time

from argparser import parser_distributed

args = parser_distributed("Graph-guided fused lasso, stochastic iteration.",
                          1,
                          "../data/Zhu_1000_10_5000_20_0.7_100",
                          255.6824**2,
                          default_output_prefix="ggfl_stoc_",
                          stoc=True,
                          default_s=10000000)
ngpu = args.ngpus
nslices = args.nslices

os.environ['CUDA_VISIBLE_DEVICES'] = ','.join([str(i) for i in range(ngpu)
                                               ])  # select devices

if not args.use_cpu:
    devices = sum([nslices * ['/gpu:%d' % (i, )] for i in range(ngpu)], [])
else:
    devices = (['/cpu:0'])
print("device_list: {}".format(devices))
import os
import dist_pd.losses
import dist_pd.penalties
import dist_pd.primal_dual 
import dist_pd.distmat
import importlib
import numpy as np 
from scipy.sparse import dia_matrix
import scipy.io
import h5py
import sys
import time
from argparser import parser_distributed

args = parser_distributed("Graph-guided fused lasso, FBF splitting.", 1, "../data/Zhu_1000_10_5000_20_0.7_100", 255.6824**2, default_output_prefix="ggfl_fbf_")
ngpu = args.ngpus
nslices = args.nslices #slices per gpu. the more the slice, the less memory usage.

os.environ['CUDA_VISIBLE_DEVICES'] = ','.join([str(i) for i in range(ngpu)]) # select devices

if not args.use_cpu:
    devices=sum([nslices*['/gpu:%d' % (i,)] for i in range(ngpu)], [])
else:
    devices=(['/cpu:0'])
print("device list: {}".format(devices))

iters = args.iters
interval = args.interval

import tensorflow as tf
dat = scipy.io.loadmat('{}.mat'.format(args.data_prefix) )
import dist_pd.split_22
import dist_pd.distmat
import dist_pd.utils
import dist_pd.partitioners
import numpy as np
from scipy.sparse import dia_matrix
import scipy.io
import h5py
import sys
import time

from argparser import parser_distributed

args = parser_distributed("Latent group lasso, FBF splitting.",
                          1,
                          "../data/ogrp_100_100_10_5000",
                          164.9960**2,
                          default_output_prefix="lgl_fbf_")

ngpu = args.ngpus
nslices = args.nslices

os.environ['CUDA_VISIBLE_DEVICES'] = ','.join([str(i) for i in range(ngpu)
                                               ])  # select devices

if not args.use_cpu:
    devices = sum([nslices * ['/gpu:%d' % (i, )] for i in range(ngpu)], [])
else:
    devices = (['/cpu:0'])
print("device list: {}".format(devices))
Example #9
0
import os
import dist_pd.losses
import dist_pd.penalties
import dist_pd.primal_dual
import dist_pd.distmat
import numpy as np
from scipy.sparse import dia_matrix
import scipy.io
import h5py
import sys
import time
from argparser import parser_distributed

args = parser_distributed("Graph-guided fused lasso, distributed mode.", 5,
                          "../data/Zhu_10000_12_5000_20_0.7_10000",
                          499.6337**2, 1100, 100, False)

ngpu = args.ngpus
nslices = args.nslices

os.environ['CUDA_VISIBLE_DEVICES'] = ','.join([str(i) for i in range(ngpu)
                                               ])  # select devices

if not args.use_cpu:
    devices = sum([nslices * ['/gpu:%d' % (i, )] for i in range(ngpu)], [])
else:
    devices = (['/cpu:0'])
print("device list: {}".format(devices))

iters = args.iters
interval = args.interval