Exemple #1
0
    def __init__(self):
        # Input shape
        self.img_rows = 128
        self.img_cols = 128
        self.channels = 3
        self.gf = 16
        self.img_shape = (self.img_rows, self.img_cols, self.channels)
        self.input_shape = (self.img_rows, self.img_cols, self.channels)
        self.model = self.build_autoencoder()
        op = Adam(0.00001)
        self.model.load_weights(r'./cnnATECSmallDense.h5')
        # self.model.save('./tfModels/corner')
        self.model.compile(loss=['mse'],
                              optimizer=op,
                              metrics=[tf.keras.metrics.MeanSquaredError()])
        self.model.summary()
        
        if tf.test.gpu_device_name():
            print('GPU found')
        else:
            print("No GPU found")
        self.gen_data = gendata.gendata()
        
        self.pathlist = []
        self.testlist = []
        self.pathbglist = []

        #self.pathbglist = glob2.glob(bg_path)

        for files in types:
            self.pathlist.extend(glob2.glob(join(path_input, files)))
    
        print(len(self.pathlist))
def experiment(D=10, n=1000, K=10):
    data = gendata(n_sample=n, dim=D, K=50)
    #plt.plot(data[:, 0], data[:, 1], '.')
    #plt.show()

    print data.shape
    vq = VQ(D=D, K=K)
    vq.index(data)
    q = np.random.randn(D)
    print 'q:', q

    t1 = time.time()
    i, v = vq.query(q)
    t2 = time.time()
    cost_vq = t2 - t1
    print 'query: i', i, 'v:', v, 'd=', np.sum((v - q)**2), 'cost:', cost_vq

    t1 = time.time()
    y = np.argmin(np.sum((data - q)**2, axis=1))
    t2 = time.time()
    cost_bf = t2 - t1
    print 'ground truth:', y, data[y, :], 'd=', np.sum(
        (data[y] - q)**2), 'cost:', cost_bf

    return cost_vq, cost_bf
def main(data,numberofcurves,stateSens,vfv):#max data in tested at 100000 curves
    showorsave=0            #to show plots or save
    e0={}; unin=[]          #entropy lists
    lz=[];e1=[];seqs=[]     #lists for holding output
    pbrk=[];translist={};   #lists &dict for holding output
    s2=[];ehold=[];call=[]
    #start parameters
    timelength = 10;    timeres = 4
    #seed=4
    seed=random.uniform(0,1)
    tcount=1; goodnessSens = .01    
    #end parameter
    #generate curves if there is no input data
    if len(data)<1: curvedata=gendata.gendata(numberofcurves,timelength,timeres,seed)
    else: numberofcurves=len(data)
    msr=0;msrchangelocs=[];tlst={}
    enc=[]
    for i in range(0,numberofcurves):
        enc.append([cpl.entnew(tlst,max(len(tlst),2)),i])
        pbrk,locations = cpl.curveprofiler(curvedata[i],goodnessSens,pbrk,stateSens)
        lz.append(locations)
        if msr!=len(pbrk):
            msrchangelocs.append(i);msr = len(pbrk)
            tlst={}
            for i in lz:
                tlst=cpl.cptotlst(i,pbrk,tlst)                
        else:
            tlst= cpl.cptotlst(locations,pbrk,tlst)
    for i in lz:
        cl=cpl.associatetocluster1(i,pbrk)
        translist,tcount =cpl.transitiontransform(cl,translist,tcount)
        call.append(cl)
    if vfv==1:            
        cg.statsprint1(translist,seqs,numberofcurves,timelength,timeres,stateSens,goodnessSens,len(pbrk)-1,tcount,curvedata,lz,pbrk,showorsave)   
    elif vfv==2:
        cg.statsprint2(translist,seqs,numberofcurves,timelength,timeres,stateSens,goodnessSens,len(pbrk)-1,tcount,curvedata,lz,pbrk,call,showorsave)
        cg.behavdict(translist,len(curvedata),showorsave)
        #cg.transplot1(pbrk,translist,seqs,len(curvedata),1,showorsave)
    cg.eplot(enc,msrchangelocs)#entropy plot
    'T3': 110,
    'B1': 0.3,
    'B4': 0.10,
    'B2': 0.05,
    'B5': 0.22,
    'B3': 0.25,
    'B6': 0.15,
    'e': 0.05
}

NN = coeffs['NN']
Tmeas = 60
Nfreqs = 3

wTpriors = buildpriors.buildpriors(Nfreqs, coeffs)
datai, funci = gendata.gendata(coeffs)

plt.figure(1)
plt.plot(datai)
plt.plot(funci)
plt.show(block=False)

start = time.time()
x = maxprob.maxprob(wTpriors, datai)
end = time.time()

print('Calculation time:')
print(end - start)

postval, h, hbars, evecs, evals = pdfval.pdfval(x, datai)
Exemple #5
0
from gendata import gendata

#Generate DATA

bs, nc = 400, 128
seed = 100
ofolder = './recon/L%04d_N%04d_S%04d/' % (bs, nc, seed)
try:
    os.makedirs(ofolder)
except:
    pass
pkfile = '../code/flowpm/Planck15_a1p00.txt'
config = Config(bs=bs, nc=nc, seed=seed, pkfile=pkfile)

truth, data = gendata(config, ofolder)

#################################################################
#Do reconstruction here
print('\nDo reconstruction\n')
tf.reset_default_graph()

kmesh = sum(kk**2 for kk in config['kvec'])**0.5
priorwt = config['ipklin'](kmesh)
# priorwt

linear = tf.get_variable('linmesh',
                         shape=(nc, nc, nc),
                         initializer=tf.random_normal_initializer(),
                         trainable=True)
icstate = tfpm.lptinit(linear, grid, config)
Exemple #6
0
from tensorflow import keras
from tensorflow.keras import layers

import numpy as np

from loss import loss

from gendata import gendata

data=np.array([gendata(1000,1) for i in range(1000)])

fdim=2



model = keras.Sequential()
# Add an Embedding layer expecting input vocab of size 1000, and
# output embedding dimension of size 64.
#model.add(layers.Embedding(input_dim=1, output_dim=8))

model.add(layers.Input(data.shape[1:]))

model.add(layers.Dense(5,use_bias=False))

model.add(layers.LSTM(fdim,return_sequences=True))
#model.add(layers.SimpleRNN(fdim,return_sequences=True))

#model.add(layers.Reshape((1,1,-1,fdim)))

#model.add(layers.Dense(1))
        grads = gradients2(W, X, Y)
        if last_cost is not None and cost is not None:
            change = (last_cost - cost) / last_cost
            if change < 0: # bold driver
                l /= 2.0
            elif change *100 < converge_percent:
                return W
        l = l * 1.1
        W -= grads * l
        last_cost = cost
    return W


if __name__ == '__main__':
    N = 8
    C = 30
    M = 1000
    X, Y, factors = gendata(n=N, m=M, c=C, seed=1)
    #X = np.hstack((X, np.ones((M,1)))) # Add column of 1's

    def print_progress(i, cost, l):
        print i, cost, l

    W = batch_gradient_descent(X, Y, C, maxiter=1000, progress_callback=print_progress)
    Y2 = predict(W, X)
    print Y == Y2
    print Y
    print W


Exemple #8
0
def acc_2(y_true, y_pred):
    return metrics.top_k_categorical_accuracy(y_true, y_pred, k=1)


import sys

args = sys.argv

if 'floyd' in args:
    input_dir = "/input/"
    output_dir = "/output/"
else:
    input_dir = "/media/sarthak/Data/MAJOR/Major/rnnsimple/input/"
    output_dir = "/media/sarthak/Data/MAJOR/Major/rnnsimple/output/"

dataset = gendata(input_dir + "ATIS_samples/")
trainSentences, trainY, trainL, trainlist = dataset['train']
validSentences, validY, validL, validlist = dataset['valid']
testSentences, testY, testL, testlist = dataset['test']
idx2labels = dataset['idx2labels']
idx2words = dataset['idx2words']
idx2intents = dataset['idx2intents']

lengths = [len(x) for x in trainSentences]
print 'Input sequence length range: ', max(lengths), min(lengths)

maxlen = max(lengths)
print 'Maximum sequence length:', maxlen

X_train = pad_sequences(trainSentences, maxlen=maxlen)
y_train = pad_sequences(trainY, maxlen=maxlen)
Exemple #9
0
#remove everything in the collection
coll.remove()

#populate database with data/biz.dat
for line in open('data/biz.dat'):
    binfo_l = map(lambda s:s.strip(), line.split('\t'))
    assert(len(binfo_l) == 2)
    coll.insert({'name':binfo_l[0], 'url':binfo_l[1]})

#generate bloom filter based on 'url' 
bfcoll = db[COLL_NAME] #get another collection object
bfcoll = bloomify(bfcoll, 'url')

#generate test files
T1 = 'data/testset'; gendata(1000,0.5, output_file=open(T1,'w'))

#function to performtimed tests
def timed_test(testf_path, coll):
    tic = time.clock()
    for line in open(testf_path):
        binfo_l = map(lambda l:l.strip(), line.split("\t"))
        assert(len(binfo_l)==2)
        [bname, burl] = binfo_l
        coll.find_one({'url':burl})
    toc = time.clock()
    return toc-tic


#make sure both coll objects do not contain the same find_one function
assert(not(coll.find_one == bfcoll.find_one))