def get_image(self):
     start = tic()
     print "capturing_image"
     with self.front_buf_lock:
         retval = xmlrpclib.Binary(self.double_buf[self.buf_ptr].getvalue())
     print tic()-start
     return retval
Esempio n. 2
0
def test_lap3dcharge():
    """ test gradient of pot in lap3dcharge, eval speeds of slow & jit & self.
    Barnett 9/11/18
    """
    x = array([0,1,-1])  # choose a targ
    y = array([1,2,.4]); q = array([2])   # 1 src
    u = 0*q; g = 0*y
    lap3dcharge_numba(y,q,x,u,g) # compile & check jit can be called w/ nt=ns=1
    u = lap3dcharge_native(y,q,x)   # check native can be called w/ nt=ns=1
    # check grad... inline funcs needed (NB 2nd tuple in gradf)
    f = lambda x: lap3dcharge_native(y,q,x.T,ifgrad=False)
    gradf = lambda x: lap3dcharge_native(y,q,x.T,ifgrad=True)[1].ravel()
    print('test_lap3dcharge: grad check (native): ', checkgrad(x,f,gradf))
    # perf tests...
    ns = 2000                    # sources
    nt = 1000                    # targs (check rect case)
    y = random.rand(ns,3)     # sources in [0,1]^3
    q = random.randn(ns)      # charges
    x = random.rand(nt,3)     # targs
    #y=np.asfortranarray(y); x=np.asfortranarray(x); q=np.asfortranarray(q)
    t0=tic()
    u,g = lap3dcharge_native(y,q,x,ifgrad=True)    # native python
    t=tic()-t0
    print("native: %d src-targ pairs in %.3g s: %.3g Gpair/s" % (ns*nt,t,ns*nt/t/1e9))
    u2 = zeros(nt)    # numba version writes outputs to arguments
    g2 = zeros([nt,3])
    lap3dcharge_numba(y,q,x,u2,g2)
    t0=tic()
    lap3dcharge_numba(y,q,x,u2,g2)
    t =tic()-t0
    print("numba:  %d src-targ pairs in %.3g s: %.3g Gpair/s" % (ns*nt,t,ns*nt/t/1e9))
    print("pot err numba vs native:  %.3g"%(np.max(np.abs(u-u2))))
    print("grad err numba vs native: %.3g"%(np.max(np.abs(g-g2))))
Esempio n. 3
0
def perf_test():
    """ test speed of pot in lap3dcharge, eval speeds of slow & jit & self.
    Barnett 9/11/18. cut down 3/3/20
    """
    # perf tests...
    ns = 10000  # sources
    nt = 10000  # targs
    y = random.rand(ns, 3)  # sources in [0,1]^3
    q = random.randn(ns)  # charges
    x = random.rand(nt, 3)  # targs
    #y=np.asfortranarray(y); x=np.asfortranarray(x); q=np.asfortranarray(q)
    u = lap3dcharge_native(y, q, x)  # warm up
    t0 = tic()
    u = lap3dcharge_native(y, q, x)  # native python
    t = tic() - t0
    print("native: %d src-targ pairs in %.3g s: %.3g Gpair/s" %
          (ns * nt, t, ns * nt / t / 1e9))
    u2 = zeros(nt)  # numba version writes outputs to arguments
    lap3dcharge_numba(y, q, x, u2)  # warm up
    t0 = tic()
    lap3dcharge_numba(y, q, x, u2)  # numba
    t = tic() - t0
    print("numba:  %d src-targ pairs in %.3g s: %.3g Gpair/s" %
          (ns * nt, t, ns * nt / t / 1e9))
    print("pot err numba vs native:  %.3g" % (np.max(np.abs(u - u2))))
Esempio n. 4
0
def test_lap3d3p(tol=1e-3, verb=1, gamma=None):
    """Speed and accuracy test for lap3d3p (Laplace 3D triply-periodic) class.
    Optional inputs:
       tol - requested tolerance
       gamma - override near box size, see lap3d3p precomp().
       verb - verbosity: 1 for text, 2 for text+pictures
    """
    #l3k.warmup()
    random.seed(seed=0)
    L = array([[1, 0, 0], [0.3, 1, 0], [-0.2, 0.3,
                                        0.9]])  # rows are lattice vecs
    #L = eye(3)        # cubical (best-case) lattice
    #L[0,0]=2.0        # cuboid (beyond aspect ratio 2 it dies)
    p = lap3d3p(L, verb=1)  # make a periodizing object
    p.precomp(tol=tol, gamma=gamma, verb=verb)  # doesn't need the src pts
    if verb > 1:
        pl.ion()
        ax = p.show()  # plot it
    ns = 500  # sources
    y = (random.rand(ns, 3) - 1 / 2).dot(
        L)  # in [-1/2,1/2]^3 then xform to latt
    d = random.randn(ns, 3)  # dipole strength vectors
    c = random.randn(ns)  # charge strengths
    c -= np.mean(c)  # (must sum to 0)
    x = (random.rand(ns, 3) - 1 / 2).dot(L)  # same num of new targets
    # change the first 8 targs to the corners, to check periodicity...
    x[0:8, :] = (np.vstack([zeros(3), eye(3), 1 - eye(3),
                            ones(3)]) - 1 / 2).dot(L)
    reps = 10  # time the eval (jits have been warmed up)...
    t0 = tic()
    for i in range(reps):
        u, g = p.eval(y, d, x, c)
    teval = (tic() - t0) / reps
    print('3d3p eval time = %.3g ms' % (1e3 * teval))
    #print(u[0:8])     # peri-checking pot vals - NB arbitrary constant offset
    #print(g[0:8])     # peri-checking field vals
    print('max corner pot peri abs err: %.3g' %
          (np.max(u[0:8]) - np.min(u[0:8])))
    print('max corner grad peri rel err: %.3g' %
          (np.max(np.abs(g[0] - g[1:8])) / norm(g[0])))
    us, gs = p.eval(y, d)  # test output opts: eval at sources only...
    us2, gs2, ut, gt = p.eval(y, d, x,
                              ifsrc=True)  # test src and targ eval together
    us2 -= us2[0] - us[0]  # correct for const pot offsets
    ut -= ut[0] - u[0]
    print('test src/targ out opts (expect 0s): %.3g %.3g %.3g %.3g' %
          (norm(us - us2, ord=np.inf), norm(
              gs - gs2, ord=1), norm(ut - u, ord=np.inf), norm(gt - g, ord=1)))
    if verb > 0:
        u, g = p.eval(y, d, x, verb=verb)  # to report more info
    t0 = tic()  # time the free-space eval...
    for i in range(reps):
        l3k.lap3ddipole_numba(y, d, x, u, g)
    tnonper = (tic() - t0) / reps
    print('cf non-per eval time %.3g ms (ratio: %.3g)\n' %
          (1e3 * tnonper, teval / tnonper))
Esempio n. 5
0
 def classify(self, image):
     #return "notebook\nlaptop\ndesktop\ncomputer\nmodem\nscreen"
     print "--------------------------------------"
     print "input size:", len(image)
     start_time = tic()
     stream = StringIO.StringIO(image)
     img = np.asarray(Image.open(stream))
     scores = net.classify(img)
     #str(net.top_k_prediction(scores, 5))
     print "Hey"
     end_time = tic()
     print "time:", end_time - start_time
     res = "\n".join(net.top_k_prediction(scores, 5)[1])
     print "res:", res.replace("\n", " ")
     return res
Esempio n. 6
0
def Section(description="", printer=None):
    from time import time as tic
    fname = inspect.currentframe().f_back.f_back.f_code.co_name
    printer and printer(f"{fname} -- {description}...")
    start = tic()
    yield
    printer and printer(f"{fname} -- {description} [{(tic() - start):.2g}s].")
Esempio n. 7
0
def warmup():
    """Warm-up (compile) the numba jits for little (n>1 needed) cases
    """
    print('lap3dkernels: wait for numba jit compiles...')
    n=10; y = random.rand(n,3); q=random.rand(n)
    d = 1*y; x=-1*y; g = 0*x; u=0*x[:,0]
    t0=tic()
    lap3dcharge_numba(y,q,x,u,g)
    lap3dcharge_numba(y,q,x,u,g,add=True)  # apparently a separate compile?
    lap3ddipole_numba(y,d,x,u,g)
    lap3ddipole_numba(y,d,x,u,g,add=True)
    lap3ddipoleself_numba(y,d,u,g)
    A = zeros([n,n]); An=0*A;
    lap3dchargemat_numba(y,x,d,A,An)
    lap3ddipolemat_numba(y,d,x,d,A,An)
    print('all jit compiles %.3g s'%(tic()-t0))
	def insert( self ):
		
		# get the key
		current_index = self._total_insert
		key = self._keys[ current_index ]
		
		# generate the value with an md5 applied to the key
		value = hashlib.md5( key.encode( 'utf-8' ) ).hexdigest()
			
		
		start = tic()
		try:
			# insert them into the hash table
			self._hash_table.put( key, value )
			# incremental count af all successfull insert
			self._successfull_insert += 1

		except Exception as e:
			# Doesn't matter if some key falls the insertion
			pass
		stop  = toc()
		
		# increment the total insert time
		self._total_insert_time += ( stop - start )

		# incremental count of all operations
		self._total_insert += 1
Esempio n. 9
0
 def __exit__(self, type, value, traceback):
     self.t1 = tic()
     msg = "=> Done"
     if self.timing != "none":
         t, unit = self._get_time_and_unit(self.t1 - self.t0)
         msg += f" in {t:.3f} {unit}"
     self.print_message(msg)
     if self.newline:
         self.print_message()
Esempio n. 10
0
def train(epoch, data_loader, model, criterion, optimizer, cpu_only=False):
    model.train()

    batch_time = AverageMeter()
    data_time = AverageMeter()
    loss_meter = AverageMeter()
    prec1_meter = AverageMeter()

    end = tic()
    for i, (inputs, targets) in enumerate(data_loader):
        data_time.update(tic() - end)

        if not cpu_only:
            targets = targets.cuda(async=True)
        inputs = Variable(inputs)
        targets = Variable(targets)

        outputs = model(inputs)
        loss = criterion(outputs, targets)
        prec1, = accuracy(outputs.data, targets.data, topk=(1, ))

        batch_size = inputs.size(0)
        loss_meter.update(loss.data[0], batch_size)
        prec1_meter.update(prec1[0], batch_size)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(tic() - end)
        end = tic()

        print('Epoch: [{}][{}/{}]  '
              'Time {:.3f} ({:.3f})  '
              'Data {:.3f} ({:.3f})  '
              'Loss {:.3f} ({:.3f})  '
              'Prec1 {:.3f} ({:.3f})  '.format(epoch, i, len(data_loader),
                                               batch_time.val, batch_time.avg,
                                               data_time.val, data_time.avg,
                                               loss_meter.val, loss_meter.avg,
                                               prec1_meter.val,
                                               prec1_meter.avg))
Esempio n. 11
0
def train(args, epoch, data_loader, model, criterion, optimizer, scheduler,
          evaluators, logger):
    summary = Summary(logger=logger)
    model.train()
    timestamp = tic()
    for i, (inputs, targets) in enumerate(data_loader):
        times = {"data": tic() - timestamp}
        if not args.cpu_only:
            inputs = to_cuda(inputs)
            targets = to_cuda(targets)
        outputs = model(inputs)
        losses = criterion(outputs, targets)
        with torch.no_grad():
            metrics = {
                k: float(v(outputs, targets))
                for k, v in evaluators.items()
            }

        optimizer.zero_grad()
        losses["overall_loss"].backward()
        optimizer.step()
        try:
            lr = scheduler.get_last_lr()[0]
        except Exception:
            # For backward compatibility
            lr = scheduler.get_lr()[0]

        times["time"] = tic() - timestamp
        summary.add(times=times,
                    lrs={"lr": lr},
                    losses=losses,
                    metrics=metrics)
        summary.print_current(
            prefix=f"[{epoch}/{args.num_epochs}][{i + 1}/{len(data_loader)}]")
        if args.log:
            wandb.log(metrics)
            wandb.log(losses)
        timestamp = tic()
    scheduler.step()
Esempio n. 12
0
def DataBaseChoice(energy, choice, region):
    global databasechoicetime
    start = tic()
    for i in range(len(Energy_dic_atoms)):
        if A[region][choice] == Energy_dic_atoms[i]:
            choice_index = i
    index_energy = min(range(len(Energy_dic[choice_index])), key=lambda i: abs(Energy_dic[choice_index][i] - energy))
    if Energy_dic[choice_index][index_energy] not in Energy_current[choice_index]:
        Energy_current[choice_index].append(Energy_dic[choice_index][index_energy])
        cosine_current = Cosines[choice_index][index_energy]
        probs_current = Probability[choice_index][index_energy]
        if A[region][choice] != 2 and A[region][choice] != 14:
            cosine_current.reverse()
            probs_current.reverse()
        cosine, probs = interpolate(cosine_current, probs_current, 1000)
        cdf, cosine = CreateCDF(probs, cosine[0], cosine[-1], 1000, 1)
        Angles[choice_index].append(cosine)
        Probs[choice_index].append(probs)
        CDF[choice_index].append(cdf)
    current_index = Energy_current[choice_index].index(Energy_dic[choice_index][index_energy])
    angle = Choice(CDF[choice_index][current_index], Angles[choice_index][current_index])
    databasechoicetime += tic() - start
    return angle
def run_test(max_kk=10):
    for kk in range(max_kk):
        print(kk)
        mesh=UnitSquareMesh(2**kk,2**kk)

        VV=FunctionSpace(mesh,'CG',1)
        uu=TrialFunction(VV)
        vv=TestFunction(VV)
        t0 = tic()
        AA=assemble(inner(grad(uu),grad(vv))*dx)
        tt_regular = tic()-t0
        print('    {:3e}'.format(tt_regular))

        WV=FiniteElement('CG',mesh.ufl_cell(),1)
        WR=FiniteElement('R',mesh.ufl_cell(),0)
        WW=FunctionSpace(mesh,WV*WR)
        uu, pp=TrialFunctions(WW)
        vv, qq=TestFunctions(WW)

        t0 = tic()
        BB=assemble(inner(grad(uu),grad(vv))*dx)
        tt_mixed = tic()-t0
        print('    {:3e} = [{:3e}] x {:3e}'.format(tt_mixed,tt_mixed/tt_regular,tt_regular))
Esempio n. 14
0
def evaluate(args, epoch, data_loader, model, evaluators, logger):
    summary = Summary(logger=logger)
    model.eval()
    timestamp = tic()
    batch_results = []
    for i, (inputs, targets) in enumerate(data_loader):
        times = {"data": tic() - timestamp}
        if not args.cpu_only:
            inputs = to_cuda(inputs)
            targets = to_cuda(targets)
        outputs = model(inputs)
        batch_results.append(
            {k: v.step(outputs, targets)
             for k, v in evaluators.items()})

        times["time"] = tic() - timestamp
        summary.add(times=times)
        summary.print_current(
            prefix=f"[{epoch}/{args.num_epochs}][{i + 1}/{len(data_loader)}]")
        timestamp = tic()
    results = collate_batches(batch_results)
    metrics = {k: float(v.stop(results[k])) for k, v in evaluators.items()}
    return metrics
	def delete( self ):
		
		low  = 0
		high = self._successfull_insert
		random_index = random.randint( low, high )
		random_key = self._keys[ random_index ]
		
		start = tic()
		try:
			self._hash_table.delete( random_key )
			self._successfull_delete += 1
		except:
			# does not matter if a search goes wrong
			pass
		stop  = toc()

		# increment the total delete time
		self._total_delete_time += ( stop - start )
		
		# incremental count of all operations
		self._total_delete += 1
Esempio n. 16
0
def test_lap3ddipole():
    """ test gradient of pot in lap3ddipole, eval speeds of slow & jit & self.
    Barnett 9/5/18
    """
    x = array([0,1,-1])  # choose a targ
    y = array([1,2,.4]); d = array([2,1,3])   # 1 src and dipole strength
    u = array([0]); g = 0*d
    lap3ddipole_numba(y,d,x,u,g)  # compile & check jit can be called w/ nt=ns=1
    u = lap3ddipole_native(y,d,x)   # check native can be called w/ nt=ns=1
    # check grad... inline funcs needed (NB 2nd tuple in gradf)
    f = lambda x: lap3ddipole_native(y,d,x.T,ifgrad=False)
    gradf = lambda x: lap3ddipole_native(y,d,x.T,ifgrad=True)[1].ravel()
    print('test_lap3ddipole: grad check (native): ', checkgrad(x,f,gradf))

    # perf tests...
    ns = 1000                    # sources
    nt = 2000                    # targs (check rect case)
    y = random.rand(ns,3)     # sources in [0,1]^3
    d = random.randn(ns,3)    # strength vectors
    x = random.rand(nt,3)     # targs
    # try swap storage order: (2x speed up for native code, strangely)...
    #y=np.asfortranarray(y); x=np.asfortranarray(x); d=np.asfortranarray(d)
    t0=tic()
    u,g = lap3ddipole_native(y,d,x,ifgrad=True)    # native python
    t =tic()-t0
    print("native: %d src-targ pairs in %.3g s: %.3g Gpair/s" % (ns*nt,t,ns*nt/t/1e9))
    u2 = zeros(nt)    # numba version writes outputs to arguments
    g2 = zeros([nt,3])
    lap3ddipole_numba(y,d,x,u2,g2)   # warm up
    t0=tic()
    lap3ddipole_numba(y,d,x,u2,g2)
    t =tic()-t0
    print("numba:  %d src-targ pairs in %.3g s: %.3g Gpair/s" % (ns*nt,t,ns*nt/t/1e9))
    print("pot err numba vs native:  %.3g"%(np.max(np.abs(u-u2))))
    print("grad err numba vs native: %.3g"%(np.max(np.abs(g-g2))))

    n = 2000                        # sources for self-eval j!=i test
    y = random.rand(n,3)              # in [0,1]^3
    d = random.randn(n,3)
    pot = 0*y[:,0];  grad = 0*y   # allocate output arrays
    lap3ddipoleself_numba(y,d,pot,grad)   # compile to warm-up, 0.3 s!
    t0=tic()
    lap3ddipoleself_numba(y,d,pot,grad)
    t=tic()-t0
    print("numba self: %d src-targ pairs in %.3g s: %.3g Gpair/s" % (n*n,t,n*n/t/1e9))
Esempio n. 17
0
def test_lap3dmats():
    """test the matrix fillers match the native evaluator answers.
    Also tests timing. Conclusion: numba can fill around 1e9 els/sec but
    numexpr is 4x slower, at least on i7.
    """
    ns = 5000                    # sources
    y = random.rand(ns,3)     # sources in [0,1]^3
    d = random.randn(ns,3)    # strength vectors (ought to be unit len)
    q = random.randn(ns)      # charges
    nt = 10000                    # targs (check rect case)
    x = random.rand(nt,3)     # targs
    e = random.randn(nt,3)    # targ normals (ought to be unit len)
    u = zeros(nt)             # true pot and grad outputs
    g = zeros([nt,3])
    for meth in range(2):
        print("meth=numba:" if meth==0 else "meth=numexpr:")
        # charge (monopole)...
        lap3dcharge_numba(y,q,x,u,g)
        A = zeros([nt,ns]); An = zeros([nt,ns]);  # alloc mats
        t0=tic()
        lap3dchargemat_numba(y,x,e,A,An) if meth==0 else lap3dchargemat_ne(y,x,e,A,An)
        t = tic()-t0
        print("chg mats fill:  two %d*%d mats in %.3g s: %.3g Gels/s" % (nt,ns,t,2*ns*nt/t/1e9))
        t0 = tic()
        ufrommat = A @ q[:,None]
        t = tic()-t0
        print("matvec: %.3g s: %.3g Gops/s" % (t,ns*nt/t/1e9))
        print('chg mat pot err nrm = ', norm(u[:,None] - ufrommat))  # u make col vec!
        gfrommat = An @ q[:,None]
        gdote = np.sum(g*e,axis=1)[:,None]   # e-direc derivs
        print('chg mat n-grad err nrm = ', norm(gdote - gfrommat))
        # dipole...
        lap3ddipole_numba(y,d,x,u,g)
        A = zeros([nt,ns]); An = zeros([nt,ns]);  # alloc mats
        t0=tic()
        lap3ddipolemat_numba(y,d,x,e,A,An) if meth==0 else lap3ddipolemat_ne(y,d,x,e,A,An)
        t = tic()-t0
        print("dip mats fill:  two %d*%d mats in %.3g s: %.3g Gels/s" % (nt,ns,t,2*ns*nt/t/1e9))
        ufrommat = A @ np.ones([ns,1])   # pot from unit dipstrs
        print('dip mat pot err nrm = ', norm(u[:,None] - ufrommat))
        gfrommat = An @ np.ones([ns,1])   # grad from unit dipstrs
        gdote = np.sum(g*e,axis=1)[:,None]   # e-direc derivs
        print('dip mat n-grad err nrm = ', norm(gdote - gfrommat),'\n')
Esempio n. 18
0
#!/usr/bin/python

import sys

sys.path.append("decaf")
from decaf.scripts.imagenet import DecafNet
import cStringIO as StringIO
import Image
import numpy as np
from time import time as tic

net = DecafNet('imagenet.decafnet.epoch90', 'imagenet.decafnet.meta')

start_time = tic()

#img = np.asarray(Image.open("cat.jpg"))
img_content = open("cat.jpg").read()
print len(img_content)
stream = StringIO.StringIO(img_content)
img = np.asarray(Image.open(stream))

scores = net.classify(img)
print net.top_k_prediction(scores, 5)
#scores = net.classify(img, center_only=True)

end_time = tic()
print "diff_time: %f" % (end_time - start_time)
	diff_abs_times = []
	merge_times = []
	insertion_times = []

	# size goes from MIN_ARRAY_SIZE to MAX_ARRAY_SIZE
	for size in size_range:

		# generate size random integers between -100 and 100
		input_array = generate_random_integers( size )

		# insert input inside sorter objects
		insertion_sorter.get_input( input_array )
		merge_sorter.get_input( input_array )

		# sort with insertion_sorter and benchmark
		start_time_insertion = tic()
		insertion_sorter.asc_sort()
		end_time_insertion   = toc()

		# sort with merge_sorter and benchmark
		start_time_merge = tic()
		merge_sorter.asc_sort()
		end_time_merge   = toc()
		
		'''
		# assert that all is correct
		insertion_output = insertion_sorter.get_output()
		merge_output     = merge_sorter.get_output()
		
		assert len( insertion_output )            == size
		assert len( merge_output )                == size
Esempio n. 20
0
tShift_ = 4
checkMasks = True  # Check quality of predrawn masks for each video
maskFile = 'maskData_19FEB2016.pkl'  # used to have 1APR2016
refPointFile = 'refPoint_31OCT2015.pkl'
homographyFile = 'offCenterTopView_18FEB2016.pkl'  # Use None for no transform / used to have 1APR2016
viewResults = True
viewInterval = 1
saveInterval = 600
saveData = True
continueAnalysis = True
figNum = 123

# Cleanup existing windows
plt.close('all')
destroyAllWindows()
t0 = tic()
t1 = tic()

# Get list of video files to process
pathToFiles = os.path.join(folder, fileString)
fileList = glob.glob(pathToFiles)
dataList = glob.glob(os.path.join(folder, '*' + suffix))
N = len(fileList)
hMatrix = 0

for i in range(0, N, 1):
    # Parse the filename to get video info
    videoPath = fileList[i]
    print os.path.split(videoPath)[1]
    dataFile = fileList[i][:-4] + suffix
    RPM = VF.get_RPM_from_file_name(videoPath)
Esempio n. 21
0
import sys

sys.path.append("BackEnd/")  # Givvs Run.py access to files in BackEnd subdirectory
import math, subprocess
import PypyNumpy as np
from time import time as tic
from math import cos as cos
from math import sin as sin
from multiprocessing import Pool
from contextlib import closing

start = tic()

from Inputs import *
from Functions import *

###################################
##### Collisions ##################
###################################
def CrossSection(energy, region):
    """ Energy is neutron energy in eV """
    """ Decides which atom to collide with, records it, calculates that cross section
       and returns it."""
    cross_section = list(
        np.vector(Fits(energy, region)) * Ratio[region]
    )  # this line takes into account the % of substance in the material's effect on the choice of what atom is collided with
    cross_section.append(0), cross_section.insert(0, 0)
    choice = DiscreteChoice(cross_section)
    xc = cross_section[choice + 1] * BarnToCmSquare
    return (xc, choice)
from scipy.integrate import nquad
from scipy.integrate import dblquad
import numpy as np
from time import time as tic

def function(t,x):
    return x+t    
    #return np.exp(-x)*np.sin(t) 
def opts_1():
    return {}
    
start = tic()
for i in range(10):
    I = dblquad(function,0,1,lambda y: 0,lambda y:1,epsabs=1E-8)
    #I = nquad(function,[[0,1],[0,1]],opts=[opts_1()])
stop = tic()






def linspace(start, stop, n):
    if n == 1:
        yield stop
        return
    h = (stop - start) / (n - 1)
    for i in range(n):
        yield start + h * i

n = 100
    I = 0
    for i in range(n+1):
        I += b_j[i]*function(t_j[i])
    return I
def Trapazoid(function,x):
    y = function(x)
    del_x = (x[-1]-x[0])/(2*(len(x)-1))
    A = 2*np.identity(len(x))
    A[0][0] , A[-1][-1] = 1 , 1
    I = sum(A.dot(y))*del_x
    return (I)

import math
import numpy as np
from time import time as tic
def func(t):
    return t**2
    #return math.exp(-t**2)
x = np.linspace(0,1,10)
I_1 = Trapazoid(func,x)
I_2 = GuassQuad(func,2,0,1)

start = tic()
for i in range(10000):           
    GuassQuad(func,3,0,1)
print('Time to run guassian quad:',tic()-start)
start = tic()
for i in range(10000):
    Trapazoid(func,x)
print('Time to run trapazoid:',tic()-start)
Esempio n. 24
0
def Section(description: str, out=None):
	fname = inspect.currentframe().f_back.f_back.f_code.co_name
	out and out(F"{fname} -- {description}")
	start = tic()
	yield
	out and out(F"{fname} -- {description} [{(tic() - start):.2g}s]")
class SolverWrapper(object):
    """A simple wrapper around Caffe's solver.
    This wrapper gives us control over he snapshotting process, which we
    use to unnormalize the learned bounding-box regression weights.
    """
    def __init__(self, sess, saver, network, imdb, roidb, output_dir, pretrained_model=None):
        self.net = network
        self.imdb = imdb
        self.roidb = roidb
        self.output_dir = output_dir
        self.pretrained_model = pretrained_model

        print 'Computing bounding-box regression targets...'

        # True
        if cfg.TRAIN.BBOX_REG:
            #不同类的均值与方差,返回格式means.ravel(), stds.ravel()
            self.bbox_means, self.bbox_stds = rdl_roidb.add_bbox_regression_targets(roidb)
        print('done')

        # for checkpoint
        self.saver = saver

    def snapshot(self, sess, iter):
        """Take a snapshot of the network after unnormalizing the learned
        bounding-box regression weights. This enables easy use at test-time.
        """
        net = self.net

        if cfg.TRAIN.BBOX_REG and net.layers.has_key('bbox_pred'):
            # save original values
            with tf.variable_scope('bbox_pred', reuse=True):
                weights = tf.get_variable("weights")
                biases = tf.get_variable("biases")

            orig_0 = weights.eval()
            orig_1 = biases.eval()

            # scale and shift with bbox reg unnormalization; then save snapshot
            weights_shape = weights.get_shape().as_list()
            sess.run(net.bbox_weights_assign, feed_dict={net.bbox_weights: orig_0 * np.tile(self.bbox_stds, (weights_shape[0], 1))})
            sess.run(net.bbox_bias_assign, feed_dict={net.bbox_biases: orig_1 * self.bbox_stds + self.bbox_means})

        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)

        infix = ('_' + cfg.TRAIN.SNAPSHOT_INFIX
                 if cfg.TRAIN.SNAPSHOT_INFIX != '' else '')
        filename = (cfg.TRAIN.SNAPSHOT_PREFIX + infix +
                    '_iter_{:d}'.format(iter+1) + '.ckpt')
        filename = os.path.join(self.output_dir, filename)

        self.saver.save(sess, filename)
        print 'Wrote snapshot to: {:s}'.format(filename)

        if cfg.TRAIN.BBOX_REG and net.layers.has_key('bbox_pred'):
            with tf.variable_scope('bbox_pred', reuse=True):
                # restore net to original state
                sess.run(net.bbox_weights_assign, feed_dict={net.bbox_weights: orig_0})
                sess.run(net.bbox_bias_assign, feed_dict={net.bbox_biases: orig_1})

    #sigma为3,计算smooth_l1损失
    def _modified_smooth_l1(self, sigma, bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights):
        """
            ResultLoss = outside_weights * SmoothL1(inside_weights * (bbox_pred - bbox_targets))
            SmoothL1(x) = 0.5 * (sigma * x)^2,    if |x| < 1 / sigma^2
                          |x| - 0.5 / sigma^2,    otherwise
        """
        #9
        sigma2 = sigma * sigma
        #tf.subtract(bbox_pred, bbox_targets),从bbox_pred减去bbox_targets,再与bbox_inside_weights相乘
        inside_mul = tf.multiply(bbox_inside_weights, tf.subtract(bbox_pred, bbox_targets))
        #判断abs(inside_mul)是否小于1/9,如果小于对应位置返回True,否则为False,再tf.cast转换为0和1
        smooth_l1_sign = tf.cast(tf.less(tf.abs(inside_mul), 1.0 / sigma2), tf.float32)
        smooth_l1_option1 = tf.multiply(tf.multiply(inside_mul, inside_mul), 0.5 * sigma2)
        smooth_l1_option2 = tf.subtract(tf.abs(inside_mul), 0.5 / sigma2)
        #结果就是实现上面的SmoothL1(x)结果
        smooth_l1_result = tf.add(tf.multiply(smooth_l1_option1, smooth_l1_sign),
                                  tf.multiply(smooth_l1_option2, tf.abs(tf.subtract(smooth_l1_sign, 1.0))))
        #实现:bbox_outside_weights*SmoothL1(x)
        outside_mul = tf.multiply(bbox_outside_weights, smooth_l1_result)

        return outside_mul

    def train_model(self, sess, max_iters):
        """Network training loop."""
        #返回一个RoIDataLayer类对象,内容self._roidb ,self._num_classes ,self._perm,self._cur
        data_layer = get_data_layer(self.roidb, self.imdb.num_classes)

        # RPN
        # classification loss
        #将'rpn_cls_score_reshape'层的输出(1,n,n,18)reshape为(-1,2),其中2为前景与背景的多分类得分()
        #self.net.get_output得到self.layers
        #self.layers在VGGnet_train.py中重载了,为一个dict,记录的是每一层的输出
        rpn_cls_score = tf.reshape(self.net.get_output('rpn_cls_score_reshape'), [-1, 2])
        #'rpn-data'层输出的[0]为rpn_label,shape为(1, 1, A * height, width),中存的是所有anchor的label(-1,0,1)
        # 问题1:目前感觉有异议,数据读取方向labels有问题################################
        rpn_label = tf.reshape(self.net.get_output('rpn-data')[0], [-1])
         # 剔除不参加训练的anchors
         #把rpn_label不等于-1对应引索的rpn_cls_score取出,重新组合成rpn_cls_score
         rpn_cls_score = tf.reshape(tf.gather(rpn_cls_score, tf.where(tf.not_equal(rpn_label, -1))), [-1, 2])
         #把rpn_label不等于-1对应引索的rpn_label取出,重新组合成rpn_label
         rpn_label = tf.reshape(tf.gather(rpn_label, tf.where(tf.not_equal(rpn_label, -1))), [-1])

         # faster RCNN损失之一:rpn网络的cls损失
         #score损失:tf.nn.sparse_softmax_cross_entropy_with_logits函数的两个参数logits,labels数目相同(shape[0]相同),分别为最后一层的输出与标签
         #NOTE:这个函数返回的是一个向量,要求交叉熵就tf.reduce_sum,要求损失就tf.reduce_mean
         #问题2:logits,labels应该shape相同的,但这里不同,有异议
         rpn_cross_entropy = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy_with_logits(logits = rpn_cls_score, labels = rpn_label))

         # faster RCNN损失之二:rpn网络的pred损失
         # bounding box regression L1 loss
         #'rpn_bbox_pred'层为了回归bbox,存的是(dx,dy,dw,dh)
         rpn_bbox_pred = self.net.get_output('rpn_bbox_pred')
         #'rpn-data'[1]返回一个用于anchor回归成target的包含每个anchor回归值(dx、dy、dw、dh)的array,形状((len(inds_inside), 4),即(anchors.shape[0],4)
         #重新reshape成(1, height, width, A * 4)
         rpn_bbox_targets = tf.transpose(self.net.get_output('rpn-data')[1], [0, 2, 3, 1])
         #rpn_bbox_inside_weights:标签为1的anchor,对应(1.0, 1.0, 1.0, 1.0)
         #重新reshape成(1, height, width, A * 4)
         rpn_bbox_inside_weights = tf.transpose(self.net.get_output('rpn-data')[2], [0, 2, 3, 1])
         #rpn_bbox_outside_weights:标签为0或者1的,权重初始化都为(1/num_examples,1/num_examples,1/num_examples,1/num_examples),num_examples为标签为0或者1的anchor总数
         #重新reshape成(1, height, width, A * 4)
         rpn_bbox_outside_weights = tf.transpose(self.net.get_output('rpn-data')[3], [0, 2, 3, 1])
         # 计算smooth_l1损失
         rpn_smooth_l1 = self._modified_smooth_l1(3.0 , rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights)
         # rpn_smooth_l1计算出的为一个向量,现在要合成loss形式
         rpn_loss_box = tf.reduce_mean(tf.reduce_sum(rpn_smooth_l1, reduction_indices = [1, 2, 3]))

         # R-CNN
         # classification loss
         #得到最后一个score分支fc层的输出
         # faster RCNN损失之三:final分类cls损失
         cls_score = self.net.get_output('cls_score')
         #label:筛选出的proposal与GT结合形成all_roi,从all_roi中筛选出符合的roi,得到这些roi的label
         label = tf.reshape(self.net.get_output('roi-data')[1], [-1])
         #用这些roi的label与最后一个score分支fc层的输出相比较,得到loss
         cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits = cls_score, labels = label))

         # faster RCNN损失之四:final回归reg损失
         # bounding box regression L1 loss
         #得到最后一个bbox分支fc层的输出
         bbox_pred = self.nn.get_output('bbox_pred')
         bbox_targets = self.nn.get_output('roi-data')[2]
         bbox_inside_weights = self.nn.get_output('roi-data')[3]
         bbox_outside_weights = self.nn.get_output('roi-data')[4]
         # 计算smooth_l1损失
         smooth_l1 = self._modified_smooth_l1(1.0, bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights)
         # smooth_l1计算出的为一个向量,现在要合成loss形式
         loss_box = tf.reduce_mean(tf.reduce_sum(smooth_l1, reduction_indices = [1]))

         # final loss
         # 总loss
         loss = cross_entropy + loss_box + rpn_cross_entropy + rpn_loss_box

         # optimizer and learning rate
         global_step = tf.Variable(0, trainable = False)

         #cfg.TRAIN.LEARNING_RATE为0.001,  cfg.TRAIN.STEPSIZE为50000
         #tf.train.exponential_decay(初始lr,初始步数,多少步进入下一平台值,总步数,下一次平台值是多少(基于上次的比率),staircase)
         #staircase为True则遵循刚才规则,如为False则每一次迭代更新一次
         lr = tf.train.exponential_decay(cfg.TRAIN.LEARNING_RATE, global_step, cfg.TRAIN.STEPSIZE, 0.1, staircase = True)
         # cfg.Train.MOMENTUN为0.9
         momentum = cfg.TRAIN.MOMENTUN
         # 动态系数为0.9的梯度下降法
         train_op = tf.train.MomentumOptimizer(lr, momentum).minimize(loss, global_step = global_step)

         # initialize variables
         sess.run(tf.global_variables_initializer())
         # 如果有预训练模型,则加载
         if self.pretrained_model is not None:
            print ('Loading pretrained model '
                   'weights from {:s}').format(self.pretrained_model)
            self.net.load(self.pretrained_model, sess, self.saver, True)

        last_snaphot_iter = -1
        # 记录当前时间
        timer = Timer()
        # 在最大循环次数内
        for iter in range(max_iters):
            # 得到一个batch信息
            # 依次取batch大小个roi的index
            blobs = data_layer.forward()

            # 给定placehold信息
            feed_dict = {self.net.data: blobs['data'], self.net.im_info: blobs['im_info'], self.net.keep_prob 0.5, self.net.gt_boxes: blobs['gt_boxes']}

            run_options = None
            run_metadata = None
            #False
            if cfg.TRAIN.DEBUG_TIMELINE:
                run_options = tf.RunOptions(trace_level = tf.RunOptions.FULL_TRACE)
                run_metadata = tf.RunMetadata()

            time.tic()

            rpn_loss_cls_value, rpn_loss_box_value, loss_cls_value, loss_box_value, _ = sess.run([rpn_cross_entropy, rpn_loss_box, cross_entropy, loss_box, train_op], feed_dict = feed_dict, options = run_options, run_metadata = run_metadata)

            timer.toc()
            #False
            if cfg.TRAIN.DEBUG_TIMELINE:
                trace = timeline.Timeline(step_stats=run_metadata.step_stats)
                trace_file = open(str(long(time.time() * 1000)) + '-train-timeline.ctf.json', 'w')
                trace_file.write(trace.generate_chrome_trace_format(show_memory=False))
                trace_file.close()

            if (iter+1) % (cfg.TRAIN.DISPLAY) == 0:
                print 'iter: %d / %d, total loss: %.4f, rpn_loss_cls: %.4f, rpn_loss_box: %.4f, loss_cls: %.4f, loss_box: %.4f, lr: %f'%\
                        (iter+1, max_iters, rpn_loss_cls_value + rpn_loss_box_value + loss_cls_value + loss_box_value ,rpn_loss_cls_value, rpn_loss_box_value,loss_cls_value, loss_box_value, lr.eval())
                print 'speed: {:.3f}s / iter'.format(timer.average_time)

            if (iter+1) % cfg.TRAIN.SNAPSHOT_ITERS == 0:
                last_snapshot_iter = iter
                self.snapshot(sess, iter)

        if last_snaphot_iter != iter:
            self.snapshot(sess, iter)
Esempio n. 26
0
def kernel_learning(datapath,
                    final_file,
                    representation_no=0,
                    maxnumber=3993,
                    repno=1):
    '''
    Metafunction to simplify a ML run
    datapath: list of pickled compound instances in a file
    final_file : where the results are dumped with pickle
    representation_no: [0,1,2,3,4] stand for [CM, EVCM, BOB, OM, EVOM] respectively
    maxnumber = number of compounds from datapath file to be considered

    returns:
    -------



    '''
    start = tic()

    representation_list = ["CM", "EVCM", "BOB", "OM", "EVOM"]
    repro_name = representation_list[representation_no]

    repro_sigmas = [[80], [120], [120], [15, 20, 120], [30, 150]]
    repro_lambdas = [[1e-15], [1e-15], [1e-15], [1e-15, 1e-13], [1e-14, 1e-13]]

    #define parameters for learning
    set_sizes = [5, 120, 600, 1500,
                 3000]  #not bigger than the total number of instances in set
    sigmas = repro_sigmas[
        representation_no]  # [9, 12, 15]       #how tight is the fit? needs to be tested depending on data varies widely
    lambdas = repro_lambdas[
        representation_no]  #how much variation to the initial data is introduced? 1e-17 - 1e-13 good to try

    number_of_runs = repno  #how many times should the learning be done before averaging and plotting?

    #define (hashed) representation
    representation_list = [ZRN_rep.Coulomb_Matrix_h,\
            ZRN_rep.Eigenvalue_Coulomb_Matrix_h,\
            ZRN_rep.Bag_of_Bonds_h,\
            ZRN_rep.Overlap_Matrix_h,\
            ZRN_rep.Eigenvalue_Overlap_Matrix_h]

    repro = representation_list[representation_no]

    #unpack pickled data to compounds list
    compounds = datprep.read_compounds(datapath)
    #print("len of compoundlist:", len(compounds))

    #shorten compounds to make stuff faster
    compounds = compounds[:maxnumber]
    #print("len of compoundlist used for this run:", len(compounds))

    #for compounds create list of energies and list of fingerprints in "repro" representation
    #internal = potential energy in hartree
    raw_energylist = []
    #convert to atomization energy in kcal/mol
    energylist = []
    fingerprintlist = []

    for c in compounds:

        #get properties from compound class
        energy = float(c.energy)

        Z = c.Z
        R = c.R
        N = c.N

        atomization_energy = datprep.atomization_energy(
            potential_energy=energy, nuclear_charges=Z)

        #calculate fingerprint of molecule
        fingerprint = repro(Z, R, N)

        #add energy and fingerprint to lists
        energylist.append(energy)
        fingerprintlist.append(fingerprint)

    t_compounds = tic()

    #print("time start to compounds: ", t_compounds - start)

    #run learning
    results, metadata = kler.full_kernel_ridge(fingerprintlist,\
            energylist,\
            final_file,\
            set_sizes,\
            sigmas,\
            lambdas,\
            rep_no = number_of_runs,\
            upperlimit = maxnumber,\
            representation = repro_name)

    return (results)
Esempio n. 27
0
    def eval(self, y, d, x=None, c=None, ifsrc=False, verb=0):
        """Evaluate triply-periodic Laplace 3D dipole sum at sources y (omitting
        self-interaction j=i), new targets x (distinct from sources), or both.

        The kernels are triply periodic version of those in lap3ddipole_native.

        Inputs:
          y (ns*3 float) - sources
          d (ns*3 float) - source dipole strength vectors (or None)
        Optional inputs:
          x (nt*3 float) - new target points. If absent/None, ifsrc=True is set
          c (ns float) - source charge strengths (must sum to zero)
          ifsrc (bool) - whether to self-evaluate at the sources
          verb (integer) - verbosity: 0 silent, 1 text, 2 and figs,...

        Outputs:
        If x is not None, and ifsrc == False: output a tuple of
          pott (nt float) - potentials at targets
          gradt (nt*3 float) - gradients (negative of E field) at targets
        If x is None, and ifsrc == True: output a tuple of
          pot (ns float) - potentials at sources
          grad (ns*3 float) - gradients (negative of E field) at sources
        If x is not None, and ifsrc == True: output a tuple of
          pot (ns float) - potentials at sources
          grad (ns*3 float) - gradients (negative of E field) at sources
          pott (nt float) - potentials at targets
          gradt (nt*3 float) - gradients (negative of E field) at targets

        Issues: * not sure if the most elegant output format.
                * all the lap3d eval calls in here could be switchable to FMM.
        """
        iftarg = (x is not None)
        if iftarg:
            Nt = x.shape[0]  # num targs
        else:
            ifsrc = True  # override: if no targs, presume you want src eval!
            Nt = 0
        Ns = y.shape[0]  # num srcs
        ifchg = (c is not None)
        ifdip = (d is not None)
        if verb:
            print('eval: ifsrc=%d, iftarg=%d, ifchg=%d, ifdip=%d' %
                  (ifsrc, iftarg, ifchg, ifdip))
        t1 = tic()  # sum all near images of sources, to the targets:
        y0 = y.dot(self.ilat)  # srcs xformed back as if std UC [-1/2,1/2]^3
        ynr = zeros([0, 3])  # all nr srcs, or use list for faster append?
        dnr = zeros([0, 3])  # all nr dip strength vecs
        cnr = zeros([0])  # all nr chg strengths
        # build all near source images, excluding original ones...
        for i in range(-1, 2):
            for j in range(-1, 2):
                for k in range(-1, 2):
                    if (i, j, k) != (0, 0, 0):  # exclude true sources
                        ijk = array([i, j, k])  # integer translation vec
                        y0tr = y0 + ijk  # broadcast, transl srcs
                        ii = np.max(np.abs(y0tr), axis=1) < (1 / 2 + self.gap
                                                             )  # near?
                        ynr = np.vstack([ynr, y[ii, :] + ijk.dot(self.lat)])
                        if ifchg:
                            cnr = np.hstack([cnr, c[ii]])  # since 1d array
                        if ifdip:
                            dnr = np.vstack([dnr, d[ii, :]])  # 3-by-* arrays
        if ifsrc:
            pot = 0 * y[:, 0]
            grad = 0 * y  # alloc src field output arrays
            if ifchg:
                l3k.lap3dcharge_numba(ynr, cnr, y, pot,
                                      grad)  # dir non-self near
                l3k.lap3dchargeself_numba(y, c, pot, grad,
                                          add=True)  # nr src self
            if ifdip:
                l3k.lap3ddipole_numba(ynr, dnr, y, pot, grad, add=True)
                l3k.lap3ddipoleself_numba(y, d, pot, grad, add=True)
        ynr = np.vstack([ynr, y])  # append original srcs to list
        if ifchg:
            cnr = np.hstack([cnr, c])
        if ifdip:
            dnr = np.vstack([dnr, d])
        if iftarg:
            pott = 0 * x[:, 0]
            gradt = 0 * x  # alloc targ output arrays
            # NB stacking nr srcs w/ single call here good if # targs big...
            if ifchg:
                l3k.lap3dcharge_numba(ynr, cnr, x, pott,
                                      gradt)  # eval dir near sum
            if ifdip:
                l3k.lap3ddipole_numba(ynr, dnr, x, pott, gradt, add=True)

        if verb:
            print('eval nt=%d, ns=%d: %d near src, time\t%.3g ms' %
                  (Nt, Ns, ynr.shape[0], 1e3 * (tic() - t1)))

        # compute discrepancy of near source (chg, dip) images on UC faces
        t0 = tic()
        ynr0 = ynr.dot(self.ilat)  # hack to get all nr src as if std UC (fast)
        discrep = array([])  # length-0 row vec
        m2 = self.m**2  # colloc pts per face
        for f in range(3):  # xyz face directions
            ii = ynr0[:, f] > (-1 / 2 + self.gap
                               )  # nr srcs "gap-far" from -ve face?
            df = np.zeros(m2)  # initialize val discrep on a face pair
            gf = np.zeros([m2, 3])  #            grad "
            if ifchg:
                l3k.lap3dcharge_numba(ynr[ii],
                                      cnr[ii],
                                      self.c[f, 0, :, :],
                                      df,
                                      gf,
                                      add=True)
            if ifdip:
                l3k.lap3ddipole_numba(ynr[ii],
                                      dnr[ii],
                                      self.c[f, 0, :, :],
                                      df,
                                      gf,
                                      add=True)
            df = -df
            gf = -gf  # sign for -ve face
            ii = ynr0[:, f] < (1 / 2 - self.gap
                               )  # nr srcs "gap-far" from +ve face?
            if ifchg:
                l3k.lap3dcharge_numba(ynr[ii],
                                      cnr[ii],
                                      self.c[f, 1, :, :],
                                      df,
                                      gf,
                                      add=True)
            if ifdip:
                l3k.lap3ddipole_numba(ynr[ii],
                                      dnr[ii],
                                      self.c[f, 1, :, :],
                                      df,
                                      gf,
                                      add=True)
            dnf = np.sum(self.cn[f, 0, :, :] * gf,
                         axis=1)  # n-deriv = grad dot nor
            discrep = np.hstack([discrep, df, dnf])
        if verb:
            print('\tdiscrep eval\t\t\t\t%.3g ms' % (1e3 * (tic() - t0)))

        discrep = -discrep  # since BVP solve aims to cancel discrep
        t0 = tic()  # solve Q.xi = discrep,  for aux strengths xi...
        #xi = random.rand(self.Np); discrep = self.Q.dot(xi) # TEST ONLY
        if self.facmeth == '':
            xi = la.lstsq(self.Q, discrep)[0]  # O(N^3), too slow, obviously
        elif self.facmeth == 'q' or self.facmeth == 'p':
            #xi = la.solve_triangular(self.QfacR, self.QfacQ.T @ discrep) # slow
            xi = self.invR @ (self.QfacQ.T @ discrep)  # "solve" : ~2ms
            if self.facmeth == 'p':
                xiperm = xi
                xi = 0 * xiperm
                xi[self.Qperm] = xiperm  # inverts piv QR perm, takes only 20us
        else:
            xi = self.QfacV @ (self.QfacSUh @ discrep)
        if verb:
            print('\tsolve lin sys for xi \t\t\t%.3g ms' % (1e3 *
                                                            (tic() - t0)))
            print('\tresid rel nrm %.3g' %
                  (norm(self.Q.dot(xi) - discrep) / norm(discrep)))  # adds 1ms
            print('\t|discrep|=%.3g, soln |xi|=%.3g' %
                  (norm(discrep), norm(xi)))

        t0 = tic(
        )  # add aux proxy rep to out (dipole case: srcdip = xi * direcs)
        if self.proxytype == 's':
            if ifsrc:  # eval aux contrib (charges xi) back at srcs y
                l3k.lap3dcharge_numba(self.p, xi, y, pot, grad, add=True)
            if iftarg:
                l3k.lap3dcharge_numba(self.p, xi, x, pott, gradt, add=True)
        else:
            if ifsrc:
                l3k.lap3ddipole_numba(self.p,
                                      xi[:, None] * self.pn,
                                      y,
                                      pot,
                                      grad,
                                      add=True)
            if iftarg:
                l3k.lap3ddipole_numba(self.p,
                                      xi[:, None] * self.pn,
                                      x,
                                      pott,
                                      gradt,
                                      add=True)
        if verb:
            print('\taux rep eval at targs\t\t%.3g ms' % (1e3 * (tic() - t0)))
            print('\ttotal eval time: \t\t\t%.3g ms' % (1e3 * (tic() - t1)))

        if verb > 1:  # plot all near src images, all targs x...
            pl.gca().scatter(ynr[:, 0], ynr[:, 1], ynr[:, 2], s=1, color='k')
            if iftarg:
                pl.gca().scatter(x[:, 0], x[:, 1], x[:, 2], color='g', s=3)

        if iftarg and not ifsrc:
            return pott, gradt
        elif not iftarg and ifsrc:
            return pot, grad
        elif iftarg and ifsrc:
            return pot, grad, pott, gradt
Esempio n. 28
0
    atom = dict2obj(I=I, x=x, r=r)
    Rad = dict2obj(
        range=dict2obj(orientations=t, ortho=w, detector=p[:dim - 1]))
    y = [
        np.arange(0, 5, 1 / 20),
        np.arange(0, 10, 1 / 20),
        np.arange(0, 5, 1 / 20)
    ]

    u = zeros([Y.size for Y in y[:2]], dtype='f4')
    myR = zeros([t.shape[0], p[0].size] + ([] if dim == 2 else [p[1].size]),
                dtype='f4')

    #     VolProj(atom, y, u)
    T = tic()
    RadProj(atom, Rad, myR)
    #     print(myR[0].sum(), pi / 30 / r[0, :dim].prod(), myR.shape)
    print('Projection: ', tic() - T)
    #     print(.5 * (myR ** 2).sum())

    T = tic()
    R, dR, ddR = L2derivs_RadProj(atom, Rad, 0 * myR, order=2)
    print('Derivatives: ', tic() - T)
    #     print('Energy:', R)
    print('Gradient:', dR)
    #     print('Hessian:', (abs(ddR)).round())
    #     from numpy.linalg import eigvals
    #     print('Eigenvalues:', eigvals(ddR))

    exit()
Esempio n. 29
0
    def precomp(self,
                tol=1e-3,
                proxytype='s',
                facmeth='s',
                verb=0,
                gamma=None):
        """Periodizing setup and Q factorization.
        Optionally can set:
          tol - tolerance, ie desired relative precision.
          gamma - override growth factor of unit cell for "near" image sum.
                  Must be in (1,3].  Increase it for bad unit cells.
                  Vary with care: 1.8 is good for tol=1e-3, 2.5 for tol=1e-9.
          verb - (integer) verbosity: 0 silent, 1 text, 2 and figs,...

        Experts may vary these inputs:
          proxytype - 's' (charges) or 'd' (dipoles), for aux rep
          facmeth - Q factorization: 's' for SVD, 'q' for QR, 'p' for pivoted QR
                                     '' for don't factor, use LSQ solve in eval.
        """
        self.tol = tol
        digits = -np.log10(tol)
        self.proxytype = proxytype
        # ------- hacks for numerical param choices based on tol, here:
        if gamma is None:  # inflation factor for near summation
            self.gamma = 1.5 + np.sqrt(
                self.badness) * digits / 10  # big box reduces m,P
        else:
            self.gamma = gamma
        self.gamma = min(self.gamma, 3)  # since we didn't code more than 3x3x3
        self.gap = (self.gamma -
                    1) / 2  # gap from near box to std UC bdry = "nei"
        scaledgap = self.gap / self.badness
        self.m = int(
            1 + 0.9 * digits / scaledgap)  # tweak: colloc pts per face side
        # ------- (end hacks)
        self.c, self.cn = self.UCsurfpts(self.m, grid='g')  # make colloc pts
        self.Nc = self.c.shape[1]
        P = self.m  # aux pts per face side = "order"
        p, pn = self.UCsurfpts(P, grid='b')  # make aux src pts, normals
        self.Np = 6 * p.shape[2]  # num aux (proxy) pts
        self.p = self.gamma * p.reshape([self.Np, 3
                                         ])  # gather aux pts together
        self.pn = pn.reshape([self.Np, 3])
        if verb:
            print(
                'precomp: gam=%.3g, m=%d per face side, P=%d per proxy side' %
                (self.gamma, self.m, P))

        t0 = tic()  # setup periodizing matrix & factorized inverse...
        self.fillQ()  # keeps Q around as an attribute
        if verb:
            print(
                'Q size %d*%d, fill time %.3g s. factorizing (est time %.2g s)...'
                % (self.Q.shape[0], self.Q.shape[1], tic() - t0,
                   (self.Q.size**1.5) / 2e9))
        t0 = tic()
        self.facmeth = facmeth
        if facmeth == 'q':  # *** NB both QR methods fail for "fat" Q currently
            self.QfacQ, R = la.qr(self.Q, mode='economic')  # no pivoting, fast
            self.invR = la.inv(R)
            if verb:
                print('\tQR + inv(R) time %.3g s, norm(invR)=%.3g' %
                      (tic() - t0, norm(self.invR)))
        elif facmeth == 'p':  # pivoted QR is 4x slower vs QR! (true in matlab)
            self.QfacQ, R, self.Qperm = la.qr(self.Q,
                                              mode='economic',
                                              pivoting=True)
            self.invR = la.inv(R)
            if verb:
                print('\tpiv-QR + inv(R) time %.3g s, norm(invR)=%.3g' %
                      (tic() - t0, norm(self.invR)))
            #print(self.invR)  # guess mult by invR will be bkw stable, good.
        else:  # best & slowest: SVD, observe smaller soln norms
            U, svals, Vh = la.svd(self.Q,
                                  full_matrices=False,
                                  check_finite=False)
            isvals = 1 / svals
            cutoff = max(1e-4 * tol, 1e-15)  # well below tolerance
            isvals[svals < cutoff * svals[0]] = 0.0  # truncated pinv
            self.QfacSUh = isvals[:, None] * U.T.conj()  # combine 2 factors
            self.QfacV = Vh.T.conj()  # Hermitian transpose
            if verb:
                print('\tSVD time %.3g s' % (tic() - t0))
import numpy as np
from scipy.integrate import dblquad 
from time import time as tic

def func_1_d(x):
    return x-x+1
def func_2_d(x,t):
    return x**2 + t**2

N = 10
start = tic()
for i in range(N):
    I = dblquad(func_2_d,0,1,lambda t: 0,lambda t: 1)
print('Numpy function:',tic()-start)



def trapazoid(function,x):
    y = function(x)
    del_x = (x[-1]-x[0])/(2*(len(x)-1))
    A = 2*np.identity(len(x))
    A[0][0] , A[-1][-1] = 1 , 1
    I = sum(A.dot(y))*del_x
    return (I)
    

def trapazoid_2_d(function,x_1,x_2):
    I = 0
    del_x = (x_1[-1]-x_2[0])/(2*(len(x_1)-1))
    A = 4*np.identity(len(x_1))
    A[0][0] , A[-1][-1] = 1 , 1  
Esempio n. 31
0

#
# Training
#
num_epochs = 5
net = mnist_net()

mnist = mnist_data()

sess = tf.Session()
sess.run(tf.global_variables_initializer())

val_generator = mnist.val_generator()

t1 = tic()
for e in range(num_epochs):
    train_generator = mnist.train_generator()
    loss = net.train_on_generator(sess, train_generator)
    print('Epoch: {:d}/{:d},  Loss:{:4f}'.format(e + 1, num_epochs, loss[-1]))
t2 = tic()
sec = t2 - t1

val_acc = net.test_acc_on_generator(sess, val_generator)

print("---------------------------")
print("Train for {} epochs (5w images per epoch)".format(num_epochs))
print("")
print("val accuracy  : {:4f}".format(val_acc))
print("training time : {:4f} secs".format(sec))
print("secs/epoch    : {:4f}".format(sec / num_epochs))
Esempio n. 32
0
        # Skip if empty experiment
        if len(time) == 0:
            continue

        # Adjust data fields
        aMax = aMax[time > t0]
        aMean = aMean[time > t0]
        Q = Fun.convert_flowrate(QTarget)

        # Record mean radius when aMax stagnates - this is what is helpful for identifying peaks in EP
        aMax = np.array([
            aMax[k] for k in range(len(aMax)) if aMax[k] < diskFrac * diskRad
        ])

        # METHOD 1 - for loop of increasing step size
        t1_start = tic()
        rSplashIndPrev = [0]
        rSplash = radiusNoisy
        for step in range(1, int(maxTimeStep * fps)):
            aDiff = aMax[step:len(aMax)] - aMax[0:len(aMax) - step]
            rSplashInd = [i for i in range(len(aDiff)) if aDiff[i] == 0]
            if len(rSplashInd) == 0:
                rSplashInd = rSplashIndPrev[0]
                rSplash = aMean[rSplashInd]
                break
            elif len(rSplashInd) == 1:
                rSplashInd = rSplashInd[0]
                rSplash = aMean[rSplashInd]
                break
            rSplashIndPrev = rSplashInd
        t1_end = tic()
Esempio n. 33
0
repros = [ZRNrep.Coulomb_Matrix, ZRNrep.Eigenvalue_Coulomb_Matrix, ZRNrep.Overlap_Matrix, \
        ZRNrep.Eigenvalue_Overlap_Matrix, ZRNrep.Bag_of_Bonds]

repronames = ["CM", "EVCM", "OM", "EVOM", "BOB"]
###read list of compounds from data file

compounds = datprep.read_compounds(data_file)
compounds = compounds[:1]
print("number of compounds:", len(compounds))

#store times for every single and total calculation
one_times = [[], [], [], [], []]
total_times = []

for i in range(5):
    start = tic()
    for c in compounds:
        thisstart = tic()
        M = repros[i](c.Z, c.R)
        thisend = tic()
        one_times[i].append(thisend - thisstart)

    end = tic()
    total_times.append(end - start)

#print out results in a latex table form
#np.set_printoptions(precision=3, suppress=True)

print("Representation \t &  Total & Median & Min & Max")
for i in range(5):
    print(repronames[i], "&", \
Esempio n. 34
0
def OM_full_unsorted_matrix(Z, R, N=0):
    #print("started fast OM calculation")
    '''
    The overlap matrix is constructed as described in the
    'student-friendly guide to molecular integrals' by Murphy et al, 2018
    STO-3G basis set (3 gaussian curves used to approximate the STO solution)
    and was optimized by myself and Nick Browning

    Parameters
    ----------
    Z : 1 x n dimensional array
        contains nuclear charges
    R : 3 x n dimensional array
        contains nuclear positions
    size : size for hashed matrix
 
    Return
    ------
    D : 2D array (matrix)
        Full Coulomb Matrix, dim(Z)xdim(Z)
    '''
    tstart = tic()

    thisbasis, K = basis.build_sto3Gbasis(Z, R)
    S = np.zeros((K, K))

    #tbasis = tic()
    #taold = tbasis
    #tbold = tbasis

    for a, bA in enumerate(
            thisbasis):  #access row a of S matrix; unpack list from tuple
        #ta = tic()
        for b, bB in enumerate(thisbasis):  #same for B
            #tb = tic()
            rA, rB = bA['r'], bB[
                'r']  #get atom centered coordinates of A and B
            lA, mA, nA = bA['l'], bA['m'], bA[
                'n']  #get angular momentumnumbers of A
            lB, mB, nB = bB['l'], bB['m'], bB['n']

            aA, aB = np.asarray(bA['a']), np.asarray(bB['a'])  #alpha vectors

            for alphaB, dB in zip(bB['a'], bB['d']):

                #Implement overlap element
                normA = jmath.OM_compute_norm(aA, lA, mA,
                                              nA)  #compute norm for A
                normB = jmath.OM_compute_norm(alphaB, lB, mB, nB)

                S_xyz = jmath.OM_compute_Sxyz(rA, rB, aA, alphaB, lA, lB, mA,
                                              mB, nA, nB)
                exponent = np.exp(-aA * alphaB * jmath.IJsq(rA, rB) /
                                  (aA + alphaB))

                #t1 = tic()
                #factor is array over alphaA, dA elements
                factor = np.array(
                    bA['d']) * dB * normA * normB * exponent * S_xyz

                S[a][b] += sum(factor)
                #t2 = tic()

                #print("adding factor to blist:", t2-t1)

            #print("b,bB loop:", tbold - tb)
            #tbold = tb
        #print("a,bA loop:", taold - ta)
        #taold = ta

    tend = tic()

    #print("total time:", tend - tstart)

    return (S)
Esempio n. 35
0
 def __enter__(self):
     self.t0 = tic()
     self.print_message("=> " + str(self.description) + " ...")
Esempio n. 36
0
# GENERACIÓN DE LA INTERFAZ GRÁFICA #

root = Tk()
scoreVar = StringVar(root, value="SCORE: 0")
root.resizable(width=False, height=False)
scoreLabel = Label(root)
scoreLabel.config(textvariable=scoreVar,
                  fg="#3EFF00",
                  font=("Courier New", 18, "bold"))
scoreLabel.config(height=1, bg="black", relief="groove")
scoreLabel.pack(fill=X)
cnv = Canvas(root, width=cnvShape[0], height=cnvShape[1])
cnv.config(bg='#4DF9E4')
cnv.bind("<Button-1>", mousse_pressed)
cnv.pack()

# EJECUCIÓN DEL PROGRAMA #

bird = birdClass(cnv)
space = spaceClass(cnv)
doorID = 0
t = tic()
while True:
    if tic() - t > dt:
        bird.move_down()
        space.move()
        doorID = track_bird(bird, space, doorID)
        t = tic()
    root.update_idletasks()
    root.update()
Esempio n. 37
0
    fac = 1
    for i in range(x):
        fac *= (i+1)
    return fac
values = [i for i in range(10)]

#if __name__ == '__main__':
#    with Pool(5) as p:
#        sol = p.map(factorial, values)
#if __name__ == '__main__':
#    with closing(Pool(processes=2)) as pool:
#        sol = pool.map(factorial,values)
#print(sol)     
from time import time as tic
values = [i for i in range(10)]
start = tic()
if __name__ == '__main__':
    with closing(Pool(processes=2)) as p:
        sol = p.map(factorial, values)
print('Parallel time:',tic()-start)
start = tic()
sol = [factorial(i) for i in values]
print('Linear time:',tic()-start)


attributes = [(1,2,3,4),(3,4,5,6)]
Solution = []
def basic_add(x):
    return[x[0]+x[1]+x[2]+x[3] , x[1]]

if __name__ == '__main__':
Esempio n. 38
0
def full_kernel_ridge(fingerprint_list,
                      property_list,
                      result_file,
                      set_sizes,
                      sigmas=[],
                      lambdas=[],
                      rep_no=1,
                      upperlimit=12,
                      Choose_Folder=False,
                      representation="CM"):
    #print("result_file:", result_file)
    ''' Kernel ridge regression model
    y(X') = sum_i alpha_i K(X', X_i)
    
    Input
    -----
    fingerprint_list :  list of fingerprints
    property_list : list of learning data, e.g. energy values corresponding to the fingerprints
    result_file : file where data is stored with pickle. 
    training_size : desired size of training set
    sigmas : fitting coefficient
    lambdas : fitting coefficient
    upperlimit : int, total of training + test set. Can be used if more data is available than is being used or to bootstrap
    Choose_Folder: boolean, if True, file is directly stored to result_file.
                    if not, result file is stored in ./Pickled/Kernel_Results folder
    representation: str, abbreviation for fingerprint used


    Return
    ------
    learning_list : list of LearningResults Objects
    raw_data_files : list of names of files where raw data was stored to
    

    Stored
    ------
    raw data is stored to raw_data_file entries, learning_list is sotred to result_file
    '''

    start = tic()

    learning_list = []
    raw_data_files = []

    if not Choose_Folder:
        print("your results are stored to ./Pickled/Kernel_Results/")
        result_file = "./Pickled/Kernel_Results/" + result_file + "_" + str(
            rep_no) + "reps"

    #loop over learning defined by number of repetition, sigmas, and lamdas
    for i in range(rep_no):
        for s in sigmas:
            for l in lambdas:
                #for every i, s, l combination, a new Learning Object is created and stored to the learning list
                maes = []

                for sets in set_sizes:
                    t1 = tic()

                    #make training and test list:
                    training_indices, test_indices = make_training_test(
                        len(fingerprint_list), sets, upperlim=upperlimit)
                    #print("training:", training_indices)
                    #print("test:", test_indices)

                    tr_fingerprints = [
                        fingerprint_list[i] for i in training_indices
                    ]
                    tr_properties = [
                        property_list[i] for i in training_indices
                    ]
                    tr_size = len(training_indices)

                    tst_fingerprints = [
                        fingerprint_list[i] for i in test_indices
                    ]
                    tst_properties = [property_list[i] for i in test_indices]

                    t2 = tic()

                    K = build_kernel_matrix(tr_fingerprints, tr_size, s)
                    t3 = tic()

                    #print("\n \n \nkernel matrix:\n ", K)

                    #get alpha coefficients
                    alphas = get_alphas(K, tr_properties, tr_size, l)
                    t4 = tic()

                    #print("\n \n \n alphas:\n ", alphas)

                    #print("trainin/test split:", t2 - t1)
                    #print("kernel matrix:", t3-t2)
                    #print("alphas calculation:", t4 - t3)

                    #predict properties of test set
                    results, errors = predict_new(s, alphas, tr_fingerprints,
                                                  tst_fingerprints,
                                                  tst_properties)
                    mae = sum(abs(errors)) / (len(errors))
                    maes.append(mae)

                    #save raw data
                    filename = './tmp/%srawdata_rep%i_sigma%s_lamda%f_set%i.dat' % (
                        representation, i, str(s), l, sets)
                    raw_data_files.append(filename)
                    save_raw_data(filename, tr_properties, training_indices,
                                  tst_properties, results, test_indices)

                #add learning result to list
                learning_list.append(
                    LearningResults(l, s, np.array(set_sizes), np.array(maes)))
        print("round %i successfully finished" % (i + 1))

    #save maes with data so it can be plotted
    datprep.store_compounds(learning_list, result_file)

    return (learning_list, raw_data_files)
Esempio n. 39
0
maskFile = 'maskData_1APR2016.pkl'
refPointFile = 'refPoint_31OCT2015.pkl'
homographyFile = 'offCenterTopView_1APR2016.pkl' # Use None for no transform
viewResults = True
viewInterval = 1
saveInterval = 600
saveData = True
continueAnalysis = True
figNum = 123


   
# Cleanup existing windows
plt.close('all')
destroyAllWindows()
t0 = tic()
t1 = tic()
    
# Get list of video files to process
pathToFiles = os.path.join(folder,fileString)
fileList = glob.glob(pathToFiles)
dataList = glob.glob(os.path.join(folder,'*' + suffix))
N = len(fileList)
hMatrix = 0

for i in range(0,N,1):
    # Parse the filename to get video info
    videoPath = fileList[i]
    print os.path.split(videoPath)[1]
    dataFile = fileList[i][:-4] + suffix
    RPM = VF.get_RPM_from_file_name(videoPath)
hybrid_sorter    = HybridSort()
merge_sorter     = MergeSort()

for attempt in range(0, M_ATTEMPTS):
	
	# initialize
	hybrid_times = []
	merge_times = []

	# generate size random integers between -100 and 100
	input_array = generate_random_integers( ARRAY_SIZE )

	merge_sorter.get_input( input_array )

	# sort with merge_sorter and benchmark
	start_time_merge = tic()
	merge_sorter.asc_sort()
	end_time_merge   = toc()

	for K in K_VALUES:

		# insert input inside sorter objects
		hybrid_sorter.set_K( K )
		hybrid_sorter.get_input( input_array )
		
		# sort with hybrid_sorter and benchmark
		start_time_hybrid = tic()
		hybrid_sorter.asc_sort()
		end_time_hybrid   = toc()
		
		'''