def get_problem(config):

    # retrieve the parameter of the problem
    dataset = config['data']
    batch_size, lmbd = config['batch_size'], config['lmbd']
    seed = config.get('seed')

    # Setup the training constant and a test set
    if dataset == 'artificial':
        from Lcod.simple_problem_generator import SimpleProblemGenerator
        from Lcod.simple_problem_generator import create_dictionary

        # retrieve specific parameters for the problem
        K, p, rho = config['K'], config['p'], config['rho']
        seed_D, corr = config.get('seed_D'), config.get('corr', 0)
        D = create_dictionary(K, p, seed=seed_D)
        pb = SimpleProblemGenerator(D,
                                    lmbd,
                                    rho=rho,
                                    batch_size=batch_size,
                                    corr=corr,
                                    seed=seed)
    elif dataset == 'adverse':
        from Lcod.simple_problem_generator import SimpleProblemGenerator
        from data_handlers.dictionaries import create_adversarial_dictionary

        # retrieve specific parameters for the problem
        K, p, rho = config['K'], config['p'], config['rho']
        seed_D, corr = config.get('seed_D'), config.get('corr', 0)
        D = create_adversarial_dictionary(K, p, seed=seed_D)
        pb = SimpleProblemGenerator(D,
                                    lmbd,
                                    rho=rho,
                                    batch_size=batch_size,
                                    corr=corr,
                                    seed=seed)
    elif dataset == 'mnist':
        from Lcod.mnist_problem_generator import MnistProblemGenerator
        from Lcod.mnist_problem_generator import create_dictionary_dl
        K, save_dir = config['K'], config['save_dir']
        D = create_dictionary_dl(lmbd, K, N=10000, dir_mnist=save_dir)
        pb = MnistProblemGenerator(D,
                                   lmbd,
                                   batch_size=batch_size,
                                   dir_mnist=save_dir,
                                   seed=seed)
    elif dataset == 'images':
        from Lcod.image_problem_generator import ImageProblemGenerator
        from Lcod.image_problem_generator import create_dictionary_haar
        p = config['p']
        D = create_dictionary_haar(p)
        pb = ImageProblemGenerator(D, lmbd, batch_size=batch_size, seed=seed)
    else:
        raise NameError("dataset {} not reconized by the script"
                        "".format(dataset))
    return pb, D
Пример #2
0
def get_problem(dataset, K, p, lmbd, rho, batch_size, save_dir):
    # Setup the training constant and a test set
    if dataset == 'artificial':
        from Lcod.simple_problem_generator import SimpleProblemGenerator
        from Lcod.simple_problem_generator import create_dictionary
        D = create_dictionary(K, p, seed=290890)
        pb = SimpleProblemGenerator(D, lmbd, rho=rho, batch_size=batch_size,
                                    corr=corr, seed=422742)
    elif dataset == 'mnist':
        from Lcod.mnist_problem_generator import MnistProblemGenerator
        from Lcod.mnist_problem_generator import create_dictionary_dl
        D = create_dictionary_dl(lmbd, K, N=10000, dir_mnist=save_dir)
        pb = MnistProblemGenerator(D, lmbd, batch_size=batch_size,
                                   dir_mnist=save_dir, seed=42242)
    elif dataset == 'images':
        from Lcod.image_problem_generator import ImageProblemGenerator
        from Lcod.image_problem_generator import create_dictionary_haar
        p = int(np.sqrt(p))
        D = create_dictionary_haar(p, wavelet='haar')
        pb = ImageProblemGenerator(D, lmbd, batch_size=batch_size,
                                   seed=1234)
    else:
        raise NameError("dataset {} not reconized by the script"
                        "".format(dataset))
    return pb, D
    def test_init(self, n_layers):
        K = 10
        p = 5
        n = 100
        lmbd = .1
        D = np.random.normal(size=(K, p))

        D /= np.sqrt((D * D).sum(axis=1))[:, None]
        pb = SimpleProblemGenerator(D, lmbd)
        X, _, Z, lmbd = pb.get_batch(n)

        feed_test = {"Z": Z, "X": X, "lmbd": lmbd}

        classic = self.classic_class(D)
        classic.optimize(X, lmbd, Z, max_iter=n_layers, tol=-1)
        network = self.network_class(D, n_layers=n_layers)
        c = network.cost(**feed_test)
        assert np.isclose(c, classic.train_cost[n_layers])

        network.terminate()
        classic.terminate()
Пример #4
0
                ]
    layer_lvl = [v['n_layers'] for v in run_exps]

    # Setup saving variables
    _assert_exist('save_exp')
    save_dir = _assert_exist('save_exp', NAME_EXP)
    _assert_exist(save_dir, 'ckpt')
    save_curve = os.path.join(save_dir, "curve_cost.npy")

    # Setup the training constant and a test set
    if dataset == 'artificial':
        from Lcod.simple_problem_generator import SimpleProblemGenerator
        from Lcod.simple_problem_generator import create_dictionary
        p = 64                 # Dimension of the data
        D = create_dictionary(K, p, seed=290890)
        pb = SimpleProblemGenerator(D, lmbd, rho=rho, batch_size=batch_size,
                                    corr=corr, seed=422742)
    elif dataset == 'mnist':
        from Lcod.mnist_problem_generator import MnistProblemGenerator
        from Lcod.mnist_problem_generator import create_dictionary_dl
        D = create_dictionary_dl(lmbd, K, N=10000, dir_mnist=save_dir)
        pb = MnistProblemGenerator(D, lmbd, batch_size=batch_size,
                                   dir_mnist=save_dir, seed=42242)
    elif dataset == 'images':
        from Lcod.image_problem_generator import ImageProblemGenerator
        from Lcod.image_problem_generator import create_dictionary_haar
        p = 8
        reg_scale = 1e-4
        D = create_dictionary_haar(p)
        pb = ImageProblemGenerator(D, lmbd, batch_size=batch_size,
                                   data_dir='data/VOC', seed=1234)
    elif dataset == 'cifar':
Пример #5
0
    corr = 0                # Correlation level for the coefficients
    eps = 1e-6              # Resolution for the optimization problem
    reg_scale = 1           # scaling of the unitary penalization

    # Extra network params
    lr_init = 5e-2          # Initial learning rate for the gradient descent
    steps = 100             # Number of steps for GD between validation
    batch_size = 300        # Size of the batch for the training

    # Setup the training constant and a test set

    from Lcod.simple_problem_generator import SimpleProblemGenerator
    from Lcod.simple_problem_generator import create_dictionary
    p = 64                 # Dimension of the data
    D = create_dictionary(K, p, seed=290890)
    pb = SimpleProblemGenerator(D, lmbd, rho=rho, batch_size=batch_size,
                                corr=corr, seed=422742)

    sig_test, z0_test, zs_test, _ = pb.get_test(N_test)
    sig_val, z0_val, zs_val, _ = pb.get_batch(N_val)
    C0 = pb.lasso_cost(zs_test, sig_test)

    # Compute optimal values for validation/test sets using ISTA/FISTA
    ista = IstaTF(D, gpu_usage=gpu_usage)
    ista.optimize(X=sig_val, lmbd=lmbd, Z=zs_val,
                  max_iter=10000, tol=1e-8 * C0)
    c_val = ista.train_cost[-1]
    ista.optimize(X=sig_test, lmbd=lmbd, Z=zs_test,
                  max_iter=10000, tol=1e-8 * C0)

    feed_test = {"Z": zs_test, "X": sig_test, "lmbd": lmbd}
    feed_val = {"Z": zs_val, "X": sig_val, "lmbd": lmbd,