Ejemplo n.º 1
0
def run(patch_size,n_batch,pca_frac,overcomplete,learning_rate,final_learning_rate,n_grad_step,loss_type,n_gauss_dim,n_lat_samp,seed,param_save_freq,log_freq,sigma,s1,s2,S,device,PCA_truncation,dataset):

    os.environ["CUDA_VISIBLE_DEVICES"]=str(device)                  # 
    np.random.seed(seed)                                            # Set the RNG seed to ensure randomness 
            
    dirname = util.get_directory(direc="./model_output/",tag = loss_type + "_{}".format(n_gauss_dim))
    params  = {
        "dataset":dataset,
        "patch_size":patch_size,
        "n_batch":n_batch,
        "pca_frac":pca_frac,
        "overcomplete":overcomplete,
        "learning_rate":np.float32(learning_rate),
        "final_learning_rate":np.float32(final_learning_rate),
        "pca_truncation":PCA_truncation,
        "n_grad_step":n_grad_step,
        "loss_type":loss_type,
        "n_gauss_dim":n_gauss_dim,
        "n_lat_samp":n_lat_samp,
        "sigma":np.float32(sigma),
        "param_save_freq":param_save_freq,
        "log_freq":log_freq,
        "s1":np.float32(s1),
        "s2":np.float32(s2),
        "S":np.float32(S)
    }
    
    util.dump_file(dirname +"/model_params",params)
    
    LOG = log.log(dirname + "/logfile.csv")

    netpar    = prepare_network(params)                             # 
    var       = netpar["variance"]                                  # 
    loss_exp  = netpar["loss_exp"]                                  # 
    recon_err = netpar["recon_err"]                                 # 
    images    = netpar["images"]                                    # 
    data      = netpar["data"]                                      # 
    varif     = netpar["vardat"]                                    #
    
    #get factor to multiply LR by:
    if final_learning_rate < learning_rate:
        LR_factor = np.float32(np.exp(-np.log(learning_rate/final_learning_rate)/n_grad_step))
    else:
        print("Final LR must be lower than initial LR! Overriding with LR_factor = 1")
        LR_factor = np.float32(1)
        
    LR    = tf.Variable(np.float32(learning_rate),trainable = False)# 
    adam  = tf.train.AdamOptimizer(learning_rate = LR)              # Set up the Adam optimization 
    train = adam.minimize(loss_exp)                                 # Run training
    update_LR = tf.assign(LR,LR*LR_factor)
    run_training_loop(data,varif,images,netpar["mean"],n_batch,train,loss_exp,recon_err,LOG,dirname,log_freq,n_grad_step,param_save_freq,update_LR)
Ejemplo n.º 2
0
def run(patch_size, n_batch, pca_frac, overcomplete, learning_rate,
        n_grad_step, loss_type, n_gauss_dim, n_lat_samp, seed, param_save_freq,
        log_freq, sigma, s1, s2, S, device, PCA_truncation, dataset):

    os.environ["CUDA_VISIBLE_DEVICES"] = str(device)
    np.random.seed(seed)

    dirname = util.get_directory(direc="./model_output/",
                                 tag=loss_type + "_{}".format(n_gauss_dim))

    params = {
        "dataset": dataset,
        "patch_size": patch_size,
        "n_batch": n_batch,
        "pca_frac": pca_frac,
        "overcomplete": overcomplete,
        "learning_rate": np.float32(learning_rate),
        "pca_truncation": PCA_truncation,
        "n_grad_step": n_grad_step,
        "loss_type": loss_type,
        "n_gauss_dim": n_gauss_dim,
        "n_lat_samp": n_lat_samp,
        "sigma": np.float32(sigma),
        "param_save_freq": param_save_freq,
        "log_freq": log_freq,
        "s1": np.float32(s1),
        "s2": np.float32(s2),
        "S": np.float32(S)
    }

    util.dump_file(dirname + "/model_params", params)

    LOG = log.log(dirname + "/logfile.csv")

    netpar = prepare_network(params)
    var = netpar["variance"]
    loss_exp = netpar["loss_exp"]
    recon_err = netpar["recon_err"]
    images = netpar["images"]
    data = netpar["data"]
    varif = netpar["vardat"]

    LR = tf.Variable(np.float32(learning_rate), trainable=False)
    adam = tf.train.AdamOptimizer(learning_rate=LR)
    train = adam.minimize(loss_exp)

    run_training_loop(data, varif, images, n_batch, train, loss_exp, recon_err,
                      LOG, dirname, log_freq, n_grad_step, param_save_freq)
Ejemplo n.º 3
0
def get_GSM_data(param):
    nang = param["nori"]
    scale = param["scale"]
    fdist = param["fdist"]

    fname = "GSM_{}_{}_{}".format(scale, nang, fdist)

    imlist = glob.glob("/home/gbarello/data/BSDS300/images/*.jpg")

    Clist = []

    try:
        FF = utils.load_file("/home/gbarello/data/datasets/GSM_filters/" +
                             fname)
        LOG.log("Using pre-saved filters")

    except:
        LOG.log("Measuring Filters")
        for i in imlist:
            Clist.append(proc.get_phased_filter_samples(i, nang, scale, fdist))
            LOG.log(i + "\t{}".format(len(Clist[-1])))

        utils.dump_file(Clist,
                        "/home/gbarello/data/datasets/GSM_filters/" + fname)

        #we want to sample from each image equally, so we find the list with the fewest entries
    mlen = min([len(c) for c in Clist])
    #randomise the list and cocnatenate them all into one list
    Clist = np.array([c[np.random.choice(range(len(c)), mlen)] for c in Clist])
    Clist = np.array([k for c in Clist for k in c])

    fac = np.array([IQR(Clist[:, :, :, 0]), IQR(Clist[:, :, :, 1])])

    Clist = Clist / np.array([[fac]])

    np.random.shuffle(Clist)

    return Clist, fac
Ejemplo n.º 4
0
print("Min : {}".format(np.min(np.reshape(DATA, [-1]))))
print("IQR : {}".format(IQR(np.reshape(DATA, [-1]))))

np.random.shuffle(DATA)

#run the fit
C, Q, F, P, LOUT = TRAIN.fit_general_MGSM(DATA,
                                          segmentation,
                                          EMreps=args["em_steps"],
                                          batchsize=args["minibatch_size"],
                                          lr=args["learning_rate"],
                                          ngradstep=args["n_grad_steps"],
                                          buff=args["stochastic_buffer"],
                                          fq_shared=args["fq_shared"],
                                          f_ID=args["f_ID"])

#once it is complete, make the directory and save the data
direc = utils.get_directory(direc="./model_files/", tag="model_file")

np.savetxt(direc + "/fac.csv", fac)
np.savetxt(direc + "/train_log.csv", LOUT)
utils.save_dict(direc + "/parameters", args)
utils.dump_file(direc + "/paths.pkl", paths)
utils.dump_file(direc + "/segs.pkl", segmentation)
utils.dump_file(direc + "/kernels.pkl", kernels)

utils.dump_file(direc + "/C.pkl", C)
utils.dump_file(direc + "/Q.pkl", Q)
utils.dump_file(direc + "/F.pkl", F)
utils.dump_file(direc + "/P.pkl", P)
Ejemplo n.º 5
0
def run_training_loop(data,vdata,input_tensor,pos_mean,batch_size,train_op,loss_op,recerr_op,log,dirname,log_freq,n_grad_step,save_freq,update_LR):
    # 
    def var_loss(session,vdat,nbatch = 10):
        # SUB-FUNCTION TO CALCULATE THE LOSS OF THE VAE
        D     = split_by_batches(vdat,batch_size,shuffle = False)   # Shuffel and partition data into batches
        loss  = 0                                                   # Initialize the loss to zero
        rerr  = 0                                                   # Initialize the reconstruction loss to zero
        nb    = 0                                                   # Initialize a counter over the number of batches
        means = []                                                  # Initialize mean storage array to an empty array
        for d in D:                                                 # Loop through the different batches
            nb   += 1                                               # Update counter
            l,r,m = session.run([loss_op,recerr_op,pos_mean],{input_tensor:d}) # TENSORFLOW: RUN A SESSION??
            loss += l                                               # Update the loss function
            rerr += r                                               # Update the reconstruction error 
            means.append(m)                                         # Append the mean to the mean storage array
            if nb == nbatch:                                        # Check if passed the number of batches
                break                                               #   ... if so, BREAK
        loss /= nbatch                                              # Normalize the loss to the number of batches
        rerr /= nbatch                                              # Normalize the reconstruction error to the number of batches
        return loss,rerr,np.concatenate(means,axis = 0)             # Return the loss the reconstruction error
    
    init   = tf.global_variables_initializer()                      #
    config = tf.ConfigProto()		                            # Initialize the tensorflow session configuration
    config.gpu_options.allow_growth = True 			    # Allow for memory growth
    sess   = tf.Session(config=config)                              # Start a tensorflow session
    K.setsession(sess)
    sess.run(init)                                                  # run the session
    

    nloss   = 0                                                     # Initailize a loss   
    t1      = time.time()                                           # Record start time
    av_time = -1                                                    #
    efrac   = .9                                                    #

    log.log(["grad_step","loss","recloss","var_loss","var_rec","learning_rate","time_rem"],PRINT = True)

    t_loss_temp = []                                                #
    t_rec_temp  = []                                                # 
    lrflag      = True                                              #
    saver       = tf.train.Saver(max_to_keep = 1000)                #
    
    for grad_step in range(n_grad_step + 1):                        #
        batch          = data[np.random.choice(np.arange(len(data)),batch_size)]     # Get a batch of data
        _,loss,recloss,newLR = sess.run([train_op,loss_op,recerr_op,update_LR],{input_tensor:batch}) # Run a session to get the loss/reconstruction error
        t_loss_temp.append(loss)                                    # Append loss to the ????
        t_rec_temp.append(recloss)                                  # Append reconstruction error to the ?????
        
        if grad_step % log_freq  == 0:
            if grad_step == 0:
                av_time = -1
            elif grad_step != 0 and  av_time < 0:
                av_time = (time.time() - t1)
            else:
                av_time = efrac*av_time + (1. - efrac)*(time.time() - t1)
                
            t1               = time.time()                          # 
            trem             = av_time*((n_grad_step)+1-grad_step)  # 
            trem             = trem / log_freq / 60. / 60.          # 
            loss             = np.mean(t_loss_temp)                 # 
            recloss          = np.mean(t_rec_temp)                  # 
            vloss,vrec,means = var_loss(sess,vdata)                 # 
            log.log([grad_step,loss,recloss,vloss,vrec,newLR,trem],PRINT = True) #
            t_loss_temp = []                                        # 
            t_rec_temp  = []                                        # 

        if grad_step % save_freq == 0:                              #
            util.dump_file(dirname + "/training_means_{}.pkl".format(grad_step),means)
            saver.save(sess,dirname + "/saved_params/saved_model_{}".format(str(grad_step))) #

    saver.save(sess,dirname + "/saved_params/saved_model_{}".format("final")) #
    sess.close()                                                    # 
Ejemplo n.º 6
0
def run_training_loop(data, vdata, input_tensor, pos_mean, batch_size,
                      train_op, loss_op, recerr_op, log, dirname, log_freq,
                      n_grad_step, save_freq, update_LR):
    def var_loss(session, vdat, nbatch=10):
        D = split_by_batches(vdat, batch_size, shuffle=False)
        loss = 0
        rerr = 0
        nb = 0
        means = []
        for d in D:
            nb += 1
            l, r, m = session.run([loss_op, recerr_op, pos_mean],
                                  {input_tensor: d})
            loss += l
            rerr += r
            means.append(m)
            if nb == nbatch:
                break
        loss /= nbatch
        rerr /= nbatch

        return loss, rerr, np.concatenate(means, axis=0)

    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)

    nloss = 0

    t1 = time.time()
    av_time = -1
    efrac = .9

    log.log([
        "grad_step", "loss", "recloss", "var_loss", "var_rec", "learning_rate",
        "time_rem"
    ],
            PRINT=True)

    t_loss_temp = []
    t_rec_temp = []

    lrflag = True

    saver = tf.train.Saver(max_to_keep=1000)

    for grad_step in range(n_grad_step + 1):

        batch = data[np.random.choice(np.arange(len(data)), batch_size)]

        _, loss, recloss, newLR = sess.run(
            [train_op, loss_op, recerr_op, update_LR],
            {input_tensor: batch
             })  # Run a session to get the loss/reconstruction error

        t_loss_temp.append(loss)
        t_rec_temp.append(recloss)

        if grad_step % log_freq == 0:
            if grad_step == 0:
                av_time = -1
            elif grad_step != 0 and av_time < 0:
                av_time = (time.time() - t1)
            else:
                av_time = efrac * av_time + (1. - efrac) * (time.time() - t1)

            t1 = time.time()
            trem = av_time * ((n_grad_step) + 1 - grad_step)
            trem = trem / log_freq / 60. / 60.

            loss = np.mean(t_loss_temp)
            recloss = np.mean(t_rec_temp)

            vloss, vrec, means = var_loss(sess, vdata)

            log.log([grad_step, loss, recloss, vloss, vrec, newLR, trem],
                    PRINT=True)  #

            t_loss_temp = []
            t_rec_temp = []

        if grad_step % save_freq == 0:
            util.dump_file(
                dirname + "/training_means_{}.pkl".format(grad_step), means)
            saver.save(
                sess, dirname +
                "/saved_params/saved_model_{}".format(str(grad_step)))

    saver.save(sess, dirname + "/saved_params/saved_model_{}".format("final"))
    sess.close()
Ejemplo n.º 7
0
def run():
    def c2(c):
        return np.array([[1., c], [c, 1.]])

    def cov(n, w, dr=0):
        distance = np.array([[((i - j + (n / 2.)) % n) - n / 2.
                              for i in range(n)] for j in range(n)])

        gauss = np.exp(-(distance**2) / (2. * w * w)) - dr

        out = np.dot(gauss, gauss)

        return out / out[0, 0]

    def inp(n, w, c1, c2, i):
        stim1 = np.array([((j + (n / 2.)) % n) - (n / 2.) for j in range(n)])
        stim2 = np.array([(((j - (i % n)) + (n / 2.)) % n) - (n / 2.)
                          for j in range(n)])

        gauss1 = np.exp(-(stim1**2) / (2. * w * w))
        gauss2 = np.exp(-(stim2**2) / (2. * w * w))

        return c1 * gauss1 + c2 * gauss2

    LO = []
    HI = []
    nnLO = []
    nnHI = []

    ip = np.array([[1, 0], [0, 1], [1, 1]])

    for c in np.linspace(-.9, .9, 20):
        print(c)
        cov = c2(c)
        nc = np.identity(2) * (1 + c)

        sHI = GSM.gexp(0, 20 * ip, cov, nc * (2 * 2), precom=False)
        sLO = GSM.gexp(0, ip, cov, nc * (2 * 2), precom=False)
        snnLO = GSM.gnn(ip, cov)[:, 0]
        snnHI = GSM.gnn(20 * ip, cov)[:, 0]

        HI.append(sHI[2] / (sHI[0]))
        LO.append(sLO[2] / (sLO[0]))
        nnHI.append(snnHI[2] / (snnHI[0]))
        nnLO.append(snnLO[2] / (snnLO[0]))

    HI = np.array(HI)
    LO = np.array(LO)
    nnHI = np.array(nnHI)
    nnLO = np.array(nnLO)

    import utilities as utils

    utils.dump_file("./inference/2Dmodel_stuff.pkl", [HI, LO, nnHI, nnLO])
    exit()

    plt.plot(HI[:, 0], [1 for x in HI], 'k--', linewidth=1)

    plt.plot(HI[:, 0], HI[:, 1], 'k')
    plt.plot(nnHI[:, 0], nnHI[:, 1], 'k--')
    plt.plot(LO[:, 0], LO[:, 1], 'r')
    plt.plot(nnLO[:, 0], nnLO[:, 1], 'r--')

    plt.xlabel("Correlation")
    plt.ylabel("Modulation Ratio")

    plt.xlim(-1, 1)
    plt.ylim(0, 2)

    plt.xticks([-.5, 0, .5])

    plt.tight_layout()

    plt.savefig("./2DAIparam.pdf")
    print("done with AI")
    #what it is that we want to do here? We want to look at COS

    con = 20 * np.logspace(-2, 0, 50)

    NN = 2
    WW = np.array([.15])

    out1 = []
    out2 = []

    nnout1 = []
    nnout2 = []

    out12 = []
    out22 = []

    nnout12 = []
    nnout22 = []

    NCOV = np.identity(2)

    k = .75

    I1 = np.array([[c1, c1 / 10] for c1 in con])
    I2 = np.array([[c1 + con[-1] / 10, con[-1] + c1 / 10] for c1 in con])

    I12 = np.array([[c1, 0] for c1 in con])
    I22 = np.array([[c1, con[-1]] for c1 in con])

    print(I1.shape)
    print(I2.shape)

    CC = c2(.6)
    NCOV = np.identity(2)

    out1 = GSM.gexp(0, I1, CC, NCOV / (k * k), precom=False)
    out2 = GSM.gexp(0, I2, CC, NCOV / (k * k), precom=False)

    nnout1 = GSM.gnn(I1, CC).T
    nnout2 = GSM.gnn(I2, CC).T

    out12 = GSM.gexp(0, I12, CC, NCOV / (k * k), precom=False)
    out22 = GSM.gexp(0, I22, CC, NCOV / (k * k), precom=False)

    nnout12 = GSM.gnn(I12, CC).T
    nnout22 = GSM.gnn(I22, CC).T

    print(nnout2)

    plt.figure()

    plt.plot(con / con[-1], out1, 'r')
    plt.plot(con / con[-1], out2, 'k')

    plt.plot(con / con[-1], nnout1[0], 'r--')
    plt.plot(con / con[-1], nnout2[0], 'k--')

    plt.xlabel("contrast")
    plt.ylabel("Respose")

    #    plt.yscale("log")
    plt.xscale("log")

    plt.tight_layout()

    plt.savefig("./2Dssfig_0.pdf")

    plt.figure()

    plt.plot(con / con[-1], out12, 'r')
    plt.plot(con / con[-1], out22, 'k')

    plt.plot(con / con[-1], nnout12[0], 'r--')
    plt.plot(con / con[-1], nnout22[0], 'k--')

    plt.xlabel("contrast")
    plt.ylabel("Respose")

    #    plt.yscale("log")
    plt.xscale("log")

    plt.tight_layout()

    plt.savefig("./2Dssfig_1.pdf")
Ejemplo n.º 8
0
          +
          test.s_GRATC(args["con"],a + args["aux_angle"]*np.pi,k,r,T,surr = 0., A = A,offset = 0)]

    if args["aux_scale"] < 0:
        args["aux_scale"] = .3 * fullsize / (2 * pars["wavelengths"][0] * pars["filter_distance"])

    grats = [[f(o,0,k,k*pars["filter_distance"]*args["aux_scale"],fullsize) for o in np.logspace(-2,0,args["npnt"]) for f in LF] for k in pars["wavelengths"]]

elif args["type"] == "test":
    grats = [[stim.make_grating(o,0,k,fullsize/2,fullsize, A = A) for o in [.5]] for k in pars["wavelengths"][:1]]

elif args["type"] == "MI":
    import att_MGSM_responses as aresp

    RES,VRES = aresp.mutual_information_data(data,args["dt"])
    utils.dump_file(direc + "/MI_responses_{}.pkl".format(args["dt"]),RES)
    utils.dump_file(direc + "/MI_responses_variance_{}.pkl".format(args["dt"]),VRES)

    exit()
elif args["type"] == "on_off":
    import att_MGSM_responses as aresp

    dt = .5
    RES = aresp.on_off_response(data,args["snr"],dt,10)
    utils.dump_file(direc + "/on_off_responses_{}_{}.pkl".format(args["snr"],dt),RES)

    exit()
elif args["type"] == "nat_MI":
    import att_MGSM_responses as aresp

    RES = aresp.mutual_information_data(data,args["snr"],use_grat = False)
Ejemplo n.º 9
0
    ax.plot(cc, [1 for k in range(len(cc))], 'k--')

    plt.xlabel("Contrast")
    plt.ylabel("A.I.")

    plt.tight_layout()

    fig.savefig("./paramfig_NC.pdf")


if __name__ == "__main__":
    import utilities as utils
    import time
    out = []
    NRUN = 1
    for k in range(NRUN):
        print(k)
        t1 = time.time()
        res = run_WDIFF_plots()
        t2 = time.time()

        if k > 0:
            print("Time Left: {} minutes".format(
                ((t2 - t1) * (NRUN - k)) / 60))
        out.append(res)

        utils.dump_file("./inference/TAstim_TA_param_model_stuff.pkl", out)

#    run_k()
#run_cov()
Ejemplo n.º 10
0
 def export(self, loc):
     util.dump_file(loc, self)