Ejemplo n.º 1
0
A = np.load('A_250_500.npy')

tao = 10
eps = 0.01  #this value will not influence the traning results, it's used to see the layers that different signals use, the average layer is reduced if eps increase
for i in range(16, 17):
    prob = problems.bernoulli_gaussian_trial(A=A,
                                             M=250,
                                             N=500,
                                             L=1000,
                                             is_train=True)
    layers = networks.build_LAMP_act(prob,
                                     tao,
                                     T=i,
                                     eps=eps,
                                     shrink='soft',
                                     untied=False)
    training_stages = train.setup_training(layers,
                                           i,
                                           prob,
                                           tao=tao,
                                           trinit=1e-4,
                                           type='LAMP')
    sess = train.do_training(training_stages,
                             prob,
                             'T_' + str(i) + '_tao_' + str(tao) + '.txt',
                             restorefile='LAMP-soft T=16 trainrate=0.001.npz',
                             maxit=400000,
                             better_wait=10000)
    sess.close()
    tf.reset_default_graph()
Ejemplo n.º 2
0
import numpy as np
import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'  # BE QUIET!!!!
import tensorflow as tf

np.random.seed(1)  # numpy is good about making repeatable output
tf.set_random_seed(
    1)  # on the other hand, this is basically useless (see issue 9171)

# import our problems, networks and training modules
from tools import problems, networks, train

# Create the basic problem structure.
prob = problems.bernoulli_gaussian_trial(
    kappa=None, M=250, N=500, L=1000, pnz=.1,
    SNR=40)  #a Bernoulli-Gaussian x, noisily observed through a random matrix
#prob = problems.random_access_problem(2) # 1 or 2 for compressive random access or massive MIMO

# build a LISTA network to solve the problem and get the intermediate results so we can greedily extend and then refine(fine-tune)
layers = networks.build_LISTA(prob, T=6, initial_lambda=.1, untied=False)

# plan the learning
training_stages = train.setup_training(layers,
                                       prob,
                                       trinit=1e-3,
                                       refinements=(.5, .1, .01))

# do the learning (takes a while)
sess = train.do_training(training_stages, prob, 'LISTA_bg_giid.npz')
Ejemplo n.º 3
0
                                             is_train=True)
    if i >= 1:
        layers = networks.build_LISTA(prob,
                                      T=i,
                                      initial_lambda=.1,
                                      untied=False)
    else:
        layers = networks.build_LISTA(prob,
                                      T=1,
                                      initial_lambda=.1,
                                      untied=False)

# plan the learning
    training_stages = train.setup_training(layers,
                                           prob,
                                           i,
                                           trinit=1e-4,
                                           refinements=(.1, .01, .001),
                                           start=i)

    # do the learning (takes a while)
    if i >= 2:
        sess = train.do_training(
            training_stages,
            prob,
            'LISTA T=' + str(i - 1) + ' trainrate=0.001.npz',
            printfile='layer_' + str(i) +
            '.txt')  #'LISTA T='+str(i-1)+' trainrate=0.001.npz'
    else:
        sess = train.do_training(training_stages,
                                 prob,
                                 'Linear trainrate=0.001.npz',