コード例 #1
0
ファイル: test_display.py プロジェクト: baylabs/hdl
def test_display_multilayer():

    from hdl.display import display_multilayer

    # load a multilayer model
    from hdl.models import ConvSparseSlowModel, SparseSlowModel
    from hdl.hierarchical_learners import HDL
    from hdl.config import state_dir
    import os

    from machines_vs_neurons.machines import MODEL_YouTubeFacesCrop60

    load_models = [
        os.path.split(MODEL_YouTubeFacesCrop60)[0],
        'M_vs_N_HDL_2layer_faces_2013-03-07_22-12-09/layer_1_2013-03-07_22-12-09',
    ]
    output_functions = ['proj_rect_sat', 'proj']

    total_layers = len(load_models)
    model_sequence = []
    for layer in range(total_layers):

        m = SparseSlowModel()

        m.load(os.path.join(state_dir, load_models[layer], 'model.model'))

        model_sequence.append(m)

    hdl_learner = HDL(model_sequence=model_sequence,
                      datasource=None,
                      output_functions=output_functions,)

    display_multilayer(hdl_learner)
コード例 #2
0
ファイル: learn_kairos_hdl.py プロジェクト: baylabs/hdl
import os
import hdl
reload(hdl)

from hdl.models import SparseSlowModel
from hdl.hdl import HDL

from hdl.config import tstring, state_dir

timestring = tstring()
model_base_name = 'HDL_loga_' + timestring + '/layer_%s'

m1 = SparseSlowModel()
layer1_name = 'SparseSlowModel_patchsz020_N512_NN512_l2_subspacel1_dist_2012-05-24_17-33-06/model.model'
fname = os.path.join(state_dir,layer1_name)
m1.load(fname)
m1.model_name = model_base_name % '1'

model_sequence = [
    m1,
    SparseSlowModel(patch_sz=None, N=512,  T=48, sparse_cost='subspacel1', slow_cost='dist', perc_var=99.9, tstring=timestring, model_name=model_base_name % '2'),
    SparseSlowModel(patch_sz=None, N=256,  T=48, sparse_cost='l1', slow_cost=None, perc_var=99.9, tstring=timestring, model_name=model_base_name % '3')]

hdl_learner  = HDL(model_sequence=model_sequence,datasource='PLoS09_Cars_Planes',output_function='proj_loga')

hdl_learner.learn(layer_start=1)
コード例 #3
0
ファイル: parallel_learners.py プロジェクト: mczhu/hdl
        def setup_model_seq(patch_sz,M,N,NN,D,T,
                        sparse_cost,slow_cost,
                        lam_sparse,lam_slow,lam_l2,
                        inputmean,whitenmatrix,dewhitenmatrix,zerophasewhitenmatrix,A
                        ):

            import numpy as np
            import theano
            from hdl.models import SparseSlowModel

            global global_model_sequence

            model=SparseSlowModel()

            model.patch_sz = patch_sz
            model.M = M
            model.D = D
            model.N = N
            model.NN = NN
            model.T = T
            model.sparse_cost = sparse_cost
            model.slow_cost = slow_cost
            model.inputmean = inputmean
            model.whitenmatrix = whitenmatrix
            model.dewhitenmatrix = dewhitenmatrix
            model.zerophasewhitenmatrix = zerophasewhitenmatrix

            old_type = type(A)
            model.A = theano.shared(A.astype(theano.config.floatX))
            model.lam_sparse = theano.shared(getattr(np,theano.config.floatX)(lam_sparse))
            model.lam_slow = theano.shared(getattr(np,theano.config.floatX)(lam_slow))
            model.lam_l2 = theano.shared(getattr(np,theano.config.floatX)(lam_l2))
            #model._reset_on_load()
            new_type = type(model.A)
            model.setup(init=False)

            global_model_sequence.append(model)

            return old_type, new_type, type(model.lam_sparse), model.lam_sparse.get_value()
コード例 #4
0
ファイル: explore_model.py プロジェクト: baylabs/hdl
def explore_pairwise_ampphase():

    small_value = .001
    phase_small_value = np.exp(-1.)

    print 'Loading model'
    # faces YouTube
    model_name = 'SparseSlowModel_patchsz048_N1024_NN1024_l2_subspacel1_None_2012-02-21_12-37-25/SparseSlowModel_patchsz048_N1024_NN1024_l2_subspacel1_None.model'
    datasource = 'YouTubeFaces_aligned'

    # faces TFD
    #model_name = 'SparseSlowModel_patchsz048_N512_NN512_l2_subspacel1_None_2012-03-05_11-42-48/SparseSlowModel_patchsz048_N512_NN512_l2_subspacel1_None.model'
    datasource = 'TorontoFaces48'

    fname = os.path.join(state_dir,model_name)
    m = SparseSlowModel()
    m.load(fname)
    #m.inference_params['u_init_method'] = 'proj'
    #m.inference_params['FISTAargs']['maxiter'] = 40
    #m.inference_params['FISTAargs']['maxline'] = 40
    #m.inference_params['FISTAargs']['errthres'] = 1e-8
    #m.inference_params['FISTAargs']['verbose'] = True
    #m.lam_sparse.set_value(getattr(np,hdl.models.theano.config.floatX)(m.lam_sparse.get_value()*.1))
    #m.lam_sparse.set_value(getattr(np,hdl.models.theano.config.floatX)(.2))
    #m.reset_functions()
    l = BaseLearner(datasource=datasource,model=m)
    l.get_databatch()

    from hdl.config import fig_dir
    savepath = os.path.join(fig_dir,m.model_name + '_' + m.tstring,'explore_distribution')
    if not os.path.isdir(savepath): os.makedirs(savepath)

    print 'Get data'
    display_batches = False
    batch_size = 1000
    num_images = l.images.shape[0]
    batches = int(np.ceil(num_images//batch_size))
    u_list = []
    snr_list = []
    A = m.A.get_value()
    for bind in range(batches):
        batch = l.images[bind*batch_size:(bind+1)*batch_size,:,:]
        batch = np.double(batch.reshape((batch.shape[0],batch.shape[1]**2)).T)
        u_batch = m.inferlatent(m.preprocess(batch.copy()))
        batchhat = np.dot(m.dewhitenmatrix,np.dot(A,u_batch)) + m.inputmean
        error = batch - batchhat
        snr = -10.*np.log10(np.var(error,0)/np.var(batch,0))
        u_list.append(u_batch)
        snr_list.append(snr)
        print '%d->%d'%(bind*batch_size,(bind+1)*batch_size)

        if display_batches:
            arr = display_patches(batch-127.5,m.patch_sz,1,normalize=False)
            fname = os.path.join(savepath,'batch_%d_%d.png'%(bind,batch_size))
            toimage(np.floor(.5*(arr+1)*255)).save(fname)
            arr = display_patches(batchhat-127.5,m.patch_sz,1,normalize=False)
            fname = os.path.join(savepath,'batch_rec_%d_%d.png'%(bind,batch_size))
            toimage(np.floor(.5*(arr+1)*255)).save(fname)

    u = np.hstack(u_list)
    snr = np.hstack(snr_list)
    amp = np.sqrt(u[::2,:]**2 + u[1::2,:]**2)
    phase = np.arctan2(u[::2,:], u[1::2,:])
    print 'num indices above %f, %d'%(small_value,np.sum(amp>small_value))
    print 'num indices above %f, %d'%(small_value,np.sum(amp>phase_small_value))

    print 'Save distributions...'
    pinds = range(10) + [10,12,13,48,49] + range(100,130)

    plt.figure(1)
    plt.clf()
    plt.hist(snr.ravel(),101)
    plt.title('SNR of reconstructions')
    fname = os.path.join(savepath,'SNR_rec_lam_%2.2e_uinit_%s.png'%(m.lam_sparse.get_value(),m.inference_params['u_init_method']))
    plt.savefig(fname)

    plt.figure(1)
    plt.clf()
    plt.subplot(1,2,1)
    plt.hist(amp.ravel(),101)
    plt.title('amp values')
    plt.subplot(1,2,2)
    plt.hist(np.log(amp.ravel()[amp.ravel() > 0.]),101)
    plt.title('log(amp) values')
    fname = os.path.join(savepath,'amp_lam_%2.2e_uinit_%s.png'%(m.lam_sparse.get_value(),m.inference_params['u_init_method']))
    plt.savefig(fname)

    for iind, pind in enumerate(pinds):
        savedir = os.path.join(savepath,str(pind))
        if not os.path.isdir(savedir): os.makedirs(savedir)

        for m in range(amp.shape[0]):
            if m == pind: continue
            #if m > 10: continue
            plt.figure(1)
            plt.clf()

            valind = (amp[pind,:] > small_value) & (amp[m,:] > small_value)

            plt.subplot(1,2,1)
            H, xedges, yedges = np.histogram2d(np.log(amp[pind,valind]),np.log(amp[m,valind]),bins=16)
            plt.imshow(H,interpolation='nearest')
            plt.title('amp %d amp %d'%(pind,m))

            valind = (amp[pind,:] > phase_small_value) & (amp[m,:] > phase_small_value)

            plt.subplot(1,2,2)
            H, xedges, yedges = np.histogram2d(phase[pind,valind],phase[m,valind],bins=16)
            plt.imshow(H,interpolation='nearest')
            plt.title('phase %d phase %d'%(pind,m))

            fname = os.path.join(savedir, 'dist_%d_%d.png'%(pind,m))
            plt.savefig(fname)
        print 'Done with %d, %d/%d'%(pind,iind,len(pinds))
コード例 #5
0
ファイル: generate_stimuli.py プロジェクト: baylabs/hdl
    binomial_p = float(sparsity)/m.NN

    rvals = np.random.randn(m.NN,numstimuli)
    rvals *= np.random.binomial(1,binomial_p,size=rvals.shape)

    patches = np.dot(m.dewhitenmatrix,np.dot(m.A,rvals))

    array = display_patches(patches,m.patch_sz,fig_num=fig_num)

    savepath = os.path.join(fig_dir,m.model_name + '_' + m.tstring)
    if not os.path.isdir(savepath): os.makedirs(savepath)
    fname = os.path.join(savepath, 'Sparse_patches_%d.png'%int(sparsity))
    toimage(np.floor(.5*(array+1)*255)).save(fname)

if __name__ == '__main__':

    #model_name = 'SparseSlowModel_patchsz064_N2048_NN2048_l2_l1_None_2012-02-05_15-29-08/SparseSlowModel_patchsz064_N2048_NN2048_l2_l1_None.model'

    # faces
    model_name = 'SparseSlowModel_patchsz048_N1024_NN1024_l2_l1_None_2012-02-13_16-12-17/SparseSlowModel_patchsz048_N1024_NN1024_l2_l1_None.model'

    fname = os.path.join(state_dir,model_name)
    m = SparseSlowModel()
    m.load(fname, reset_theano=False)

    numstimuli = 20**2

    stimuli = generate_whitenedspace(m,numstimuli)

    for sparsity in [1, 2, 3, 4, 5, 10, 20, 40]:
        stimuli = generate_sparsespace(m,numstimuli,sparsity=sparsity)