def __init__(self, system):
		self.system = system
		self.screen = array([800,800])
		
		#Initialize pygame
		pygame.init()
		pygame.display.set_caption('Dynamics Visualizer')
		pygame.display.set_mode(self.screen,OPENGL|DOUBLEBUF)
		
		glViewport(0, 0, self.screen[0], self.screen[1])

		glPixelStorei(GL_PACK_ALIGNMENT,1)			
		glPixelStorei(GL_UNPACK_ALIGNMENT,1)
		
		#HACK: PyOpenGL is stupid, sometimes it returns an array other times it doesn't.  WTF?
		shape_list = system.shape_db.shape_list
		num_shapes = len(shape_list)
		if(num_shapes == 0):
			self.textures = []
		elif(num_shapes == 1):
			self.textures = [glGenTextures(num_shapes)]
		else:
			self.textures = glGenTextures(num_shapes)
		
		#Cache all of the textures
		for s in shape_list:
			glBindTexture(GL_TEXTURE_RECTANGLE_ARB, self.textures[s.shape_num])
			glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
			glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
			glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_WRAP_S, GL_CLAMP);
			glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_WRAP_T, GL_CLAMP);
			
			#Need to flatten array and threshold colors properly
			s_flat = array(255. * s.indicator / max(s.indicator.flatten()), dtype('uint8'))
			glTexImage2D(GL_TEXTURE_RECTANGLE_ARB, 0, GL_LUMINANCE, s_flat.shape[1], s_flat.shape[0], 0, GL_RED, GL_UNSIGNED_BYTE, s_flat.tostring('C'))
Exemple #2
0
 def __init__(self, renderer=True, realtime=True, ip="127.0.0.1", port="21560"):
     # initialize base class
     GraphicalEnvironment.__init__(self)
     self.actLen=12
     self.mySensors=sensors.Sensors(["EdgesReal"])
     self.dists=array([20.0, sqrt(2.0)*20, sqrt(3.0)*20])
     self.gravVect=array([0.0,-100.0,0.0])
     self.centerOfGrav=zeros((1,3),float)
     self.pos=ones((8,3),float)
     self.vel=zeros((8,3),float)
     self.SpringM = ones((8,8),float)
     self.d=60.0
     self.dt=0.02
     self.startHight=10.0
     self.dumping=0.4
     self.fraktMin=0.7
     self.fraktMax=1.3
     self.minAkt=self.dists[0]*self.fraktMin
     self.maxAkt=self.dists[0]*self.fraktMax
     self.reset()
     self.count=0
     self.setEdges()
     self.act(array([20.0]*12))
     self.euler()
     self.realtime=realtime
     self.step=0
     if renderer:
         self.setRenderInterface(FlexCubeRenderInterface(ip, port))
         self.getRenderInterface().updateData(self.pos, self.centerOfGrav)
Exemple #3
0
def cov_dvrpmllbb_to_vxyz_single(d,e_d,e_vr,pmll,pmbb,cov_pmllbb,l,b):
    """
    NAME:
       cov_dvrpmllbb_to_vxyz
    PURPOSE:
       propagate distance, radial velocity, and proper motion uncertainties to
       Galactic coordinates for scalar inputs
    INPUT:
       d - distance [kpc, as/mas for plx]
       e_d - distance uncertainty [kpc, [as/mas] for plx]
       e_vr  - low velocity uncertainty [km/s]
       pmll - proper motion in l (*cos(b)) [ [as/mas]/yr ]
       pmbb - proper motion in b [ [as/mas]/yr ]
       cov_pmllbb - uncertainty covariance for proper motion
       l - Galactic longitude [rad]
       b - Galactic lattitude [rad]
    OUTPUT:
       cov(vx,vy,vz) [3,3]
    HISTORY:
       2010-04-12 - Written - Bovy (NYU)
    """
    M= _K*sc.array([[pmll,d,0.],[pmbb,0.,d]])
    cov_dpmllbb= sc.zeros((3,3))
    cov_dpmllbb[0,0]= e_d**2.
    cov_dpmllbb[1:3,1:3]= cov_pmllbb
    cov_vlvb= sc.dot(M,sc.dot(cov_dpmllbb,M.T))
    cov_vrvlvb= sc.zeros((3,3))
    cov_vrvlvb[0,0]= e_vr**2.
    cov_vrvlvb[1:3,1:3]= cov_vlvb
    R= sc.array([[m.cos(l)*m.cos(b), m.sin(l)*m.cos(b), m.sin(b)],
                 [-m.sin(l),m.cos(l),0.],
                 [-m.cos(l)*m.sin(b),-m.sin(l)*m.sin(b), m.cos(b)]])
    return sc.dot(R.T,sc.dot(cov_vrvlvb,R))
Exemple #4
0
    def __init__(self, servIP="127.0.0.1", ownIP="127.0.0.1", port="21560"):
        self.oldScreenValues = None
        self.view = 0
        self.worldRadius = 400
        
        # Start of mousepointer 
        self.lastx = 0
        self.lasty = 15
        self.lastz = 300
        self.zDis = 1
   
        # Start of cube 
        self.cube = [0.0, 0.0, 0.0]
        self.bmpCount = 0
        self.actCount = 0
        self.calcPhysics = 0
        self.newPic = 1
        self.picCount = 0
        self.target = array([80.0, 0.0, 0.0])
      
        self.centerOfGrav = array([0.0, -2.0, 0.0])
        self.points = ones((8, 3), float)
        self.savePics = False
        self.drawCounter = 0
        self.fps = 25
        self.dt = 1.0 / float(self.fps)

        self.client = UDPClient(servIP, ownIP, port)
Exemple #5
0
def BowTieH_paramSet( paramSet ):
    """
    The Hamiltonian for a single bowtie, (2*S + 1)^5 states, for a set of d
    values -- this will be a generator equation
    """
    
    A = BowTieAdjacencyDic()
    
    #   Break the paramater set into sub-sets based on the spin value.
    paramSetDic = {}
    paramSet = scipy.array( paramSet )
    
    for row in paramSet:
        sNow = row[0]
        if sNow not in paramSetDic:
            paramSetDic[ sNow ] = []
        
        paramSetDic[ sNow ].append( row )
    
    for S in paramSetDic:
        paramSetDic[S] = scipy.array( paramSetDic[S] )
    
        N = (2*S + 1)**5
        
        sPlusMinus, sMinusPlus, sZZ, sZ2 = HParts( S, A )
        
        for params in paramSetDic[S]:
            S, Jx, Jz, d = params
            H = Jx * .5 * ( sPlusMinus + sMinusPlus ) + Jz * sZZ + d * sZ2
            yield (H, params)
Exemple #6
0
def _non_dominated_front_arr(iterable, key=lambda x: x, allowequality=True):
    """Return a subset of items from iterable which are not dominated by any
    other item in iterable.

    Faster version, based on boolean matrix manipulations.
    """
    items = list(iterable)
    fits = map(key, items)
    l = len(items)
    x = array(fits)
    a = tile(x, (l, 1, 1))
    b = a.transpose((1, 0, 2))
    if allowequality:
        ndom = sum(a <= b, axis=2)
    else:
        ndom = sum(a < b, axis=2)
    ndom = array(ndom, dtype=bool)
    res = set()
    for ii in range(l):
        res.add(ii)
        for ij in list(res):
            if ii == ij:
                continue
            if not ndom[ij, ii]:
                res.remove(ii)
                break
            elif not ndom[ii, ij]:
                res.remove(ij)
    return set(map(lambda i: items[i], res))
Exemple #7
0
    def building_loss(self, ci=None, loss_aus_contents=0):
        damage_states = self.get_building_states()
        total_costs = self.structures.cost_breakdown(ci=ci)

        (structure_state, non_structural_state,
            acceleration_sensitive_state) = damage_states
        (structure_cost, non_structural_cost,
            acceleration_cost, contents_cost) = total_costs

        # hardwired loss for each damage state
        f1 = array((0.02, 0.1, 0.5, 1.0))[newaxis, newaxis, :]
        f2 = array((0.02, 0.1, 0.5, 1.0))[newaxis, newaxis, :]
        f3 = array((0.02, 0.1, 0.3, 1.0))[newaxis, newaxis, :]
        f4 = array((0.01, 0.05, 0.25, 0.5))[newaxis, newaxis, :]
        if loss_aus_contents == 1:
            f4 = f4 * 2  # 100% contents loss if building collapses

        structure_ratio = (f1 * structure_state)  # .sum(axis=-1)
        nsd_ratio = (f2 * non_structural_state)  # .sum(axis=-1)
        accel_ratio = (f3 * acceleration_sensitive_state)  # .sum(axis=-1)
        contents_ratio = (f4 * acceleration_sensitive_state)  # .sum(axis=-1)
        loss_ratio = (structure_ratio, nsd_ratio, accel_ratio, contents_ratio)

        structure_loss = structure_ratio * structure_cost[:, newaxis, newaxis]
        nsd_loss = nsd_ratio * non_structural_cost[:, newaxis, newaxis]
        accel_loss = accel_ratio * acceleration_cost[:, newaxis, newaxis]
        contents_loss = contents_ratio * contents_cost[:, newaxis, newaxis]

        total_loss = (structure_loss, nsd_loss, accel_loss, contents_loss)

        return (loss_ratio, total_loss)
    def __init__(self, R, a, B0, Ip, betat, length_unit="m", npts=257):
        # instantiate superclass, forcing time splining to false (no time variation
        # in equilibrium)
        super(CircSolovievEFIT, self).__init__(length_unit=length_unit, tspline=False, monotonic=False)

        self._defaultUnits = {}

        self._R = R
        self._defaultUnits["_R"] = length_unit
        self._a = a
        self._defaultUnits["_a"] = length_unit
        self._B0 = B0
        self._defaultUnits["_B0"] = "T"
        self._Ip = Ip
        self._defaultUnits["_Ip"] = "MA"
        self._betat = betat
        self._npts = npts

        self._currentSign = -1 if Ip > 0 else 1
        # Remember: Ip is in MA.
        self._qstar = (2.0 * scipy.pi * a ** 2 * B0) / (4.0 * scipy.pi * 1.0e-1 * R * Ip)

        # flux definitions
        self._psiLCFS = scipy.array([0.0])
        self._psi0 = -0.5 * self._B0 * self._a ** 2 / self._qstar
        self._psi0 = scipy.array([self._psi0])

        # RZ grid
        self._rGrid = scipy.linspace(R - 1.25 * a, R + 1.25 * a, self._npts)
        self._defaultUnits["_rGrid"] = length_unit
        self._zGrid = scipy.linspace(-1.25 * a, 1.25 * a, self._npts)
        self._defaultUnits["_zGrid"] = length_unit

        self._psiRZ = self.rz2psi_analytic(self._rGrid, self._zGrid, length_unit=length_unit, make_grid=True)
        self._psiRZ = scipy.reshape(self._psiRZ, (1, npts, npts))
Exemple #9
0
	def _initParams_fast(self):
		""" 
		initialize the gp parameters
			1) project Y on the known factor X0 -> Y0
				average variance of Y0 is used to initialize the variance explained by X0
			2) considers the residual Y1 = Y-Y0 (this equivals to regress out X0)
			3) perform PCA on cov(Y1) and considers the first k PC for initializing X
			4) the variance of all other PCs is used to initialize the noise
			5) the variance explained by interaction is set to a small random number 
		"""
		Xd = LA.pinv(self.X0)
		Y0 = self.X0.dot(Xd.dot(self.Y))
		Y1 = self.Y-Y0
		YY = SP.cov(Y1)
		S,U = LA.eigh(YY)
		X = U[:,-self.k:]*SP.sqrt(S[-self.k:])
		a = SP.array([SP.sqrt(Y0.var(0).mean())])
		b = 1e-3*SP.randn(1)
		c = SP.array([SP.sqrt((YY-SP.dot(X,X.T)).diagonal().mean())])
		# gp hyper params
		params = limix.CGPHyperParams()
		if self.interaction:
			params['covar'] = SP.concatenate([a,X.reshape(self.N*self.k,order='F'),SP.ones(1),b])
		else:
			params['covar'] = SP.concatenate([a,X.reshape(self.N*self.k,order='F')])
		params['lik'] = c
		return params
Exemple #10
0
def benchmark_symbols_all_at_once(n_pts=1000,sz=(1000,1000)):
    """
    Renders all the symbols.
    """
    width,height = sz
    pts = stats.norm.rvs(size=(n_pts,2)) * array(sz)/8. + array(sz)/2.
    star_path = agg.CompiledPath()
    star_path.lines(circle_array())


    gc = agg.GraphicsContextArray(sz)
    gc.set_fill_color((1.0,0.0,0.0,0.1))
    gc.set_stroke_color((0.0,1.0,0.0,0.6))
    path = agg.CompiledPath()
    t1 = time.clock()
    for x,y in pts:
        path.save_ctm()
        path.translate_ctm(x,y)
        path.add_path(star_path)
        path.restore_ctm()
    gc.add_path(path)
    t2 = time.clock()
    gc.draw_path()
    t3 = time.clock()
    gc.save("benchmark_symbols2.bmp")
    build_path_time = t2 - t1
    render_path_time = t3 - t2
    tot_time = t3 - t1
    print 'star count, tot,building path, rendering path:', n_pts, \
          tot_time, build_path_time,render_path_time
    return
 def objfn_data_to_mesh_project(self, x0, args):
     mesh, Xd, Td = args[0], args[1], args[2]
     mesh.set_variables(x0)
     err = scipy.zeros(Xd.shape[0])
     ind = 0
     for xd in Xd:
         xi1 = mesh.elements[1].project(xd)
         xi2 = mesh.elements[2].project(xd)
         if 0<=xi1<=1:
             xi = xi1
         elif 0<=xi2<=1:
             xi = xi2
         else:
             Xi = scipy.array([xi1, xi1-1, xi2, xi2-1])
             Xi2 = Xi*Xi
             ii = Xi2.argmin()
             xi = Xi[ii]
             if ii < 2:
                 elem = 1
             else:
                 elem = 2
         dx = mesh.elements[elem].evaluate(scipy.array([xi]))[0] - xd
         err[ind] = scipy.sum(dx * dx)
         ind += 1
     return err
def parse_text( fname, n ):
    """Parse a text file containing a sentence on each line. 
    Output the file as a list of arrays with integer word indices and
    the corresponding dictionary."""

    dictionary = {}
    inv_dictionary = []
    def get_idx( w ):
        if not w in dictionary:
            dictionary[w] = len(dictionary)
            inv_dictionary.append( w )
        return dictionary[w]

    corpus = []

    f = open( fname )
    for line in f.xreadlines():
        # Read at most n documents
        if len(corpus) > n:
            break
        words = line.split()
        words = array( map( get_idx, words ) )
        if len(words) > 0:
            corpus.append( words )
    f.close()

    return array(corpus), array(inv_dictionary)
Exemple #13
0
def pos2Ray(pos, tokamak, angle=None, eps=1e-6):
    r"""Take in GENIE pos vectors and convert it into TRIPPy rays
    
    Args:
        pos: 4 element tuple or 4x scipy-array
            Each pos is assembled into points of (R1,Z1,RT,phi)

        tokamak: 
            Tokamak object in which the pos vectors are defined.
            
    Returns:
        Ray: Ray object or typle of ray objects.
        
    """

    r1 = scipy.array(pos[0])
    z1 = scipy.array(pos[1])
    rt = scipy.array(pos[2])
    phi = scipy.array(pos[3])

    zt = z1 - scipy.tan(phi)*scipy.sqrt(r1**2 - rt**2)
    angle2  = scipy.arccos(rt/r1)

    if angle is None:
        angle = scipy.zeros(r1.shape)

    pt1 = geometry.Point((r1,angle,z1),tokamak)
    pt2 = geometry.Point((rt,angle+angle2,zt),tokamak)

    output = Ray(pt1,pt2)
    output.norm.s[-1] = eps
    tokamak.trace(output)
    return output
def buildSharedCrossedNetwork():
    """ build a network with shared connections. Two hidden modules are
    symmetrically linked, but to a different input neuron than the
    output neuron. The weights are random. """
    N = FeedForwardNetwork('shared-crossed')
    h = 1
    a = LinearLayer(2, name = 'a')
    b = LinearLayer(h, name = 'b')
    c = LinearLayer(h, name = 'c')
    d = LinearLayer(2, name = 'd')
    N.addInputModule(a)
    N.addModule(b)
    N.addModule(c)
    N.addOutputModule(d)

    m1 = MotherConnection(h)
    m1.params[:] = scipy.array((1,))

    m2 = MotherConnection(h)
    m2.params[:] = scipy.array((2,))

    N.addConnection(SharedFullConnection(m1, a, b, inSliceTo = 1))
    N.addConnection(SharedFullConnection(m1, a, c, inSliceFrom = 1))
    N.addConnection(SharedFullConnection(m2, b, d, outSliceFrom = 1))
    N.addConnection(SharedFullConnection(m2, c, d, outSliceTo = 1))
    N.sortModules()
    return N
Exemple #15
0
    def __init__(self, x, y, z, bbox=[None] *4, kx=3, ky=3, s=0, bounds_error=True, fill_value=scipy.nan):

        super(RectBivariateSpline, self).__init__( x, y, z, bbox=bbox, kx=kx, ky=ky, s=s)
        self._xlim = scipy.array((x.min(), x.max()))
        self._ylim = scipy.array((y.min(), y.max()))
        self.bounds_error = bounds_error
        self.fill_value = fill_value
Exemple #16
0
    def run(self,phase=None):
        r'''
        '''
        logger.warning('This algorithm can take some time...')
        graph = self._net.create_adjacency_matrix(data=self._net['throat.length'],sprsfmt='csr')

        if phase is not None:
            self._phase = phase
            if 'throat.occupancy' in self._phase.props():
                temp = self._net['throat.length']*(self._phase['throat.occupancy']==1)
                graph = self._net.create_adjacency_matrix(data=temp,sprsfmt='csr',prop='temp')

        #self._net.tic()
        path = spgr.shortest_path(csgraph = graph, method='D', directed = False)
        #self._net.toc()

        Px = sp.array(self._net['pore.coords'][:,0],ndmin=2)
        Py = sp.array(self._net['pore.coords'][:,1],ndmin=2)
        Pz = sp.array(self._net['pore.coords'][:,2],ndmin=2)

        Cx = sp.square(Px.T - Px)
        Cy = sp.square(Py.T - Py)
        Cz = sp.square(Pz.T - Pz)
        Ds = sp.sqrt(Cx + Cy + Cz)

        temp = path/Ds
        #temp = path

        temp[sp.isnan(temp)] = 0
        temp[sp.isinf(temp)] = 0

        return temp
Exemple #17
0
def GenerateLabels(n):
    " Get proper labeling for output states. "
    # Generate bitstrings
    bitstring = []
    for i in range(0,n+1): 
        bitstring.append(kbits(n, i))
    # Flatten
    bitstring = list(itertools.chain.from_iterable(bitstring))
    # Generate unit vectors
    statelist = []
    poslist = []
    pos = 0
    unit0 = sp.array([1,0])
    unit1 = sp.array([0,1])
    for i in range(len(bitstring)):
        # Construct unit vector corresponding to bitstring
        state = unit1 if (bitstring[i][0] == '1') else unit0
        for j in range(n-1):
            state = sp.kron(state, 
                            unit1 if (bitstring[i][j+1] == '1') else unit0)
        statelist.append(state)
        # Record orientation of unit vector (using position of 1 value)
        for j in range(2**n):
            if (statelist[-1][j]):
                pos = j
                break
        poslist.append(pos)
    # Sort the states
    sortperm = sp.array(poslist).argsort()
    bitstring = [ bitstring[i] for i in sortperm ]

    return bitstring
Exemple #18
0
def plot_posterior_ci(locs, mean, sd, color, alpha_multiplier=0.1, rm=True):
    x_ci = SP.array(list(locs) + list(locs)[::-1])
    y_ci = SP.array(list(mean) + list(mean)[::-1])
    if rm: y_ci = 1. - y_ci
    sds = SP.array(list(sd) + list(-sd)[::-1])
    PL.fill(x_ci, y_ci + sds, color, alpha=alpha_multiplier)
    PL.fill(x_ci, y_ci + 2*sds, color, alpha=2*alpha_multiplier) 
Exemple #19
0
def vertsEdges(ann):
	num_layers = len(num_hidden) + 2
	nodes = []
	for i in range(num_layers):
		nodes.append([])
	for c in ann.connections:
		index = -1
		if c._name == 'in':
			index = 0
		elif c._name[:-1] == 'hidden':
			index = 1 + int(c._name[-1])
		elif c._name == 'out':
			index = num_layers - 1
		if index >= 0:
			nodes[index] = c.inputbuffer.tolist()
	edges = []
	for i in range(num_layers - 1):
		edges.append([])
	for mod in ann.modules:
		for conn in ann.connections[mod]:
			layer1, layer2 = mod.name, conn.outmod.name
			if layer1 == 'in' and layer2[:-1] == 'hidden':
				index = 0
			elif layer1[:-1] == 'hidden' and layer2 == 'out':
				index = num_layers - 2
			elif layer1 != 'bias':
				index = int(layer1[-1])
			if layer1 != 'bias':
				print index, shapes, shape(array(conn.params))
				edges[index] = array(conn.params).reshape(shapes[index]).tolist()
	return nodes, edges
Exemple #20
0
def read_as_array(filename):
    """reads audio file as scipy array using gstreamer framework

    return:
        data as scipy array
        duration in seconds
        channels as int
        samplerate
    """
    path = os.path.abspath(os.path.expanduser(filename))
    with GstAudioFile(path) as f:
        samplerate = f.samplerate
        duration = float(f.duration) / 1000000000 # in seconds
        channels = f.channels

        data_left = []
        data_right = []
        data_mono = []
        for s in f:
            # http://docs.python.org/library/struct.html
            # little or big endian is choosen automatically by python
            # if its stereo (2 channels): first one is left channel, second is right channel, third is left channel...
            # every short (h) is 2 bytes long and padding on stereo with two x (xx) for other channel
            if channels == 1:
                data_mono += list(struct.unpack( ("h"*(len(s)/2)), s))
            elif channels == 2:
                data_left += list(struct.unpack( ("hxx"*(len(s)/4)), s))
                data_right += list(struct.unpack( ("xxh"*(len(s)/4)), s))

        if channels == 1:
            data = scipy.array(data_mono)
        elif channels == 2:
            data = scipy.array([data_left, data_right])

    return data, duration, channels, samplerate
Exemple #21
0
def plot_genome(out_file, data_file, samples, dpi=300, screen=False):
    if screen: PL.rcParams.update(PLOT_PARAMS_SCREEN)
    LOG.info("plot_genome - out_file=%s, data_file=%s, samples=%s, dpi=%d"%(out_file, data_file, str(samples), dpi))
    colors = 'bgryckbgryck'

    data = read_posterior(data_file)
    if samples is None or len(samples) == 0: samples = data.keys()
    if len(samples) == 0: return

    PL.figure(None, [14, 4])
    right_end = 0 # rightmost plotted base pair
    for chrm in sort_chrms(data.values()[0]): # for chromosomes in ascending order
        max_site = max(data[samples[0]][chrm]['L']) # length of chromosome
        for s, sample in enumerate(samples): # plot all samples
            I = SP.where(SP.array(data[sample][chrm]['SD']) < 0.3)[0] # at sites that have confident posteriors
            PL.plot(SP.array(data[sample][chrm]['L'])[I] + right_end, SP.array(data[sample][chrm]['AF'])[I], alpha=0.4, color=colors[s], lw=2) # offset by the end of last chromosome
        if right_end > 0: PL.plot([right_end, right_end], [0,1], 'k--', lw=0.4, alpha=0.2) # plot separators between chromosomes
        new_right = right_end + max(data[sample][chrm]['L'])
        PL.text(right_end + 0.5*(new_right - right_end), 0.9, str(chrm), horizontalalignment='center')
        right_end = new_right # update rightmost end
    PL.plot([0,right_end], [0.5,0.5], 'k--', alpha=0.3)
    PL.xlim(0,right_end)
    xrange = SP.arange(0,right_end, 1000000)
    PL.xticks(xrange, ["%d"%(int(x/1000000)) for x in xrange])
    PL.xlabel("Genome (Mb)"), PL.ylabel("Reference allele frequency")
    PL.savefig(out_file, dpi=dpi)
Exemple #22
0
def ensure_numeric(A, typecode=None):
    """Ensure that sequence is a Numeric array.
    Inputs:
        A: Sequence. If A is already a Numeric array it will be returned
                     unaltered
                     If not, an attempt is made to convert it to a Numeric
                     array
        A: Scalar.   Return 0-dimensional array of length 1, containing that
                     value
        A: String.   Array of ASCII values
        typecode: Numeric type. If specified, use this in the conversion.
                                If not, let Numeric decide

    This function is necessary as array(A) can cause memory overflow.
    """

    if typecode is None:
        if isinstance(A, ndarray):
            return A
        else:
            return array(A)
    else:
        if isinstance(A, ndarray):
            if A.dtype == typecode:
                return array(A)  # FIXME: Shouldn't this just return A?
            else:
                return array(A, typecode)
        else:
            import types
            if isinstance(A, types.StringType):
                return array(A, dtype=int)
            return array(A, typecode)
Exemple #23
0
    def __init__(self, x, y, z, a, g, h):
        """
		Construct a Scatterer object, encapsulating the shape and material
		properties of a deformed-cylindrical object with sound speed and
		density similar to the surrounding fluid medium.

		Parameters
		----------
		x, y, z : array-like
			Posiions delimiting the central axis of the scatterer.
		a : array-like
			Array of radii along the centerline of the scatterer.
		g : array-like
			Array of sound speed contrasts (sound speed inside the scatterer
			divided by sound speed in the surrounding medium)
		h : array-like
			Array of density contrasts (density inside the scatterer
			divided by density in the surrounding medium)

		"""
        super(Scatterer, self).__init__()
        self.r = sp.matrix([x, y, z])
        self.a = sp.array(a)
        self.g = sp.array(g)
        self.h = sp.array(h)
        self.cum_rotation = sp.matrix(sp.eye(3))
Exemple #24
0
    def test_gpkronprod(self):
       # initialize
       covar_c = linear.LinearCF(n_dimensions=self.n_latent)
       covar_r = linear.LinearCF(n_dimensions=self.n_dimensions)
       X0_c = SP.random.randn(self.n_tasks,self.n_latent)
       
       lik = likelihood_base.GaussIsoLik()
       gp = gp_kronprod.KronProdGP(covar_c=covar_c, covar_r=covar_r, likelihood=lik)
       gp.setData(Y=self.Ykronprod['train'],X_r=self.X['train'])
       hyperparams = {'lik':SP.array([0.5]), 'X_c':X0_c, 'covar_r':SP.array([0.5]), 'covar_c':SP.array([0.5]), 'X_r':self.X['train']}
       # check predictions, likelihood and gradients
       gp.predict(hyperparams,Xstar_r=self.X['test'],debugging=True)

       gp._LML_covar(hyperparams,debugging=True)
       gp._LMLgrad_covar(hyperparams,debugging=True)
       gp._LMLgrad_lik(hyperparams,debugging=True)
       gp._LMLgrad_x(hyperparams,debugging=True)
       
       # optimize
       hyperparams = {'lik':SP.array([0.5]), 'X_c':X0_c, 'covar_r':SP.array([0.5]), 'covar_c':SP.array([0.5])}
       opts = {'gradcheck':True}
       hyperparams_opt,lml_opt = optimize_base.opt_hyper(gp,hyperparams,opts=opts)
       Kest = covar_c.K(hyperparams_opt['covar_c'])

       # check predictions, likelihood and gradients
       gp._invalidate_cache() # otherwise debugging parameters are not up to date!
       gp.predict(hyperparams_opt,debugging=True,Xstar_r=self.X['test'])
       gp._LML_covar(hyperparams_opt,debugging=True)
       gp._LMLgrad_covar(hyperparams_opt,debugging=True)
       gp._LMLgrad_lik(hyperparams_opt,debugging=True)
       gp._LMLgrad_x(hyperparams_opt,debugging=True)
def getStepWindow(t, v):
  # return time and voltage vectors during the stimulus period only
  
  # find the point of maximum voltage, and cut off everything afterwards
  maxInd, maxV = max(enumerate(v), key=lambda x: x[1])
  minInd, minV = min(enumerate(v), key=lambda x: x[1])
  if maxV - v[0] > v[0] - minV:
    # this is a positive step
    t = t[:maxInd]
    v = scipy.array(v[:maxInd])
  else:
    # this is a negative step, flip it for now
    t = t[:minInd]
    v = v[0] - scipy.array(v[:minInd])
  
  # re-center time to start at the point of maximum voltage change
  diffV = diff(v)
  dVInd, maxDV = max(enumerate(diffV), key=lambda x: x[1])
  dVInd -= 1
  while diffV[dVInd] > 0:
    dVInd -= 1
  dVInd += 1
  
  t -= t[dVInd]
  v -= v[dVInd]
  
  return t, v, dVInd
def ea_calc(airtemp= scipy.array([]),\
            rh= scipy.array([])):
    '''
    Function to calculate actual vapour pressure from relative humidity:
    
    .. math::    
        e_a = \\frac{rh \\cdot e_s}{100}
        
    where es is the saturated vapour pressure at temperature T.

    Parameters:
        - airtemp: array of measured air temperatures [Celsius].
        - rh: Relative humidity [%].

    Returns:
        - ea: array of actual vapour pressure [Pa].

    Examples
    --------
    
        >>> ea_calc(25,60)
        1900.0946514729308

    '''
    
    # Test input array/value
    airtemp,rh = _arraytest(airtemp, rh)

    # Calculate saturation vapour pressures
    es = es_calc(airtemp)
    # Calculate actual vapour pressure
    eact = rh / 100.0 * es
    return eact # in Pa
def vpd_calc(airtemp= scipy.array([]),\
             rh= scipy.array([])):
    '''
    Function to calculate vapour pressure deficit.

    Parameters:
        - airtemp: measured air temperatures [Celsius].
        - rh: (array of) rRelative humidity [%].
        
    Returns:
        - vpd: (array of) vapour pressure deficits [Pa].
        
    Examples
    --------
    
        >>> vpd_calc(30,60)
        1697.090397862653
        >>> T=[20,25]
        >>> RH=[50,100]
        >>> vpd_calc(T,RH)
        array([ 1168.54009896,     0.        ])
        
    '''
    
    # Test input array/value
    airtemp,rh = _arraytest(airtemp, rh)
    
    # Calculate saturation vapour pressures
    es = es_calc(airtemp)
    eact = ea_calc(airtemp, rh) 
    # Calculate vapour pressure deficit
    vpd = es - eact
    return vpd # in hPa
Exemple #28
0
    def _setnorm(self, input = None, target = None):
        """
        Retrieves normalization info from training data and normalizes data.
        """
        numi = len(self.inno); numo = len(self.outno)
        if input is None and target is None:
            self.inlimits  = array( [[0.15, 0.85]]*numi ) #informative only
            self.outlimits = array( [[0.15, 0.85]]*numo ) #informative only
            self.eni = self.dei = array( [[1., 0.]] * numi )
            self.eno = self.deo = array( [[1., 0.]] * numo )
            self.ded = ones((numo, numi), 'd')
        else:
            input, target = self._testdata(input, target)
            
            # Warn if any input or target node takes a one single value
            # I'm still not sure where to put this check....
            for i, col in enumerate(input.transpose()):
                if max(col) == min(col):
                    print "Warning: %ith input node takes always a single value of %f." %(i+1, max(col))

            for i, col in enumerate(target.transpose()):
                if max(col) == min(col):
                    print "Warning: %ith target node takes always a single value of %f." %(i+1, max(col))
            
            #limits are informative only, eni,dei/eno,deo are input/output coding-decoding
            self.inlimits, self.eni, self.dei = _norms(input, lower=0.15, upper=0.85)
            self.outlimits, self.eno, self.deo = _norms(target, lower=0.15, upper=0.85)
            self.ded = zeros((numo,numi), 'd')
            for o in xrange(numo):
                for i in xrange(numi):
                    self.ded[o,i] = self.eni[i,0] * self.deo[o,0]
            return _normarray(input, self.eni), _normarray(target, self.eno)
    def run(self, phase=None, throats=None):
        logger.warning('This algorithm can take some time...')
        conduit_lengths = sp.sum(misc.conduit_lengths(network=self._net,
                                 mode='centroid'), axis=1)
        graph = self._net.create_adjacency_matrix(data=conduit_lengths,
                                                  sprsfmt='csr')

        if phase is not None:
            self._phase = phase
            if 'throat.occupancy' in self._phase.props():
                temp = conduit_lengths*(self._phase['throat.occupancy'] == 1)
                graph = self._net.create_adjacency_matrix(data=temp,
                                                          sprsfmt='csr',
                                                          prop='temp')
        path = spgr.shortest_path(csgraph=graph, method='D', directed=False)

        Px = sp.array(self._net['pore.coords'][:, 0], ndmin=2)
        Py = sp.array(self._net['pore.coords'][:, 1], ndmin=2)
        Pz = sp.array(self._net['pore.coords'][:, 2], ndmin=2)

        Cx = sp.square(Px.T - Px)
        Cy = sp.square(Py.T - Py)
        Cz = sp.square(Pz.T - Pz)
        Ds = sp.sqrt(Cx + Cy + Cz)

        temp = path / Ds

        temp[sp.isnan(temp)] = 0
        temp[sp.isinf(temp)] = 0

        return temp
def makesumrule(ptype,plen,ts,lagtype='centered'):
    """ This function will return the sum rule.
        Inputs
            ptype - The type of pulse.
            plen - Length of the pulse in seconds.
            ts - Sample time in seconds.
            lagtype -  Can be centered forward or backward.
        Output
            sumrule - A 2 x nlags numpy array that holds the summation rule.
    """
    nlags = sp.round_(plen/ts)
    if ptype.lower()=='long':
        if lagtype=='forward':
            arback=-sp.arange(nlags,dtype=int)
            arforward = sp.zeros(nlags,dtype=int)
        elif lagtype=='backward':
            arback = sp.zeros(nlags,dtype=int)
            arforward=sp.arange(nlags,dtype=int)
        else:
            arback = -sp.ceil(sp.arange(0,nlags/2.0,0.5)).astype(int)
            arforward = sp.floor(sp.arange(0,nlags/2.0,0.5)).astype(int)
        sumrule = sp.array([arback,arforward])
    elif ptype.lower()=='barker':
        sumrule = sp.array([[0],[0]])
    return sumrule
Exemple #31
0
def _mc_data_config(H, psi0, h_stuff, c_ops, c_stuff, args, e_ops, options):
    """Creates the appropriate data structures for the monte carlo solver
    based on the given time-dependent, or indepdendent, format.
    """

    #take care of expectation values, if any
    if any(e_ops):
        odeconfig.e_num = len(e_ops)
        for op in e_ops:
            if isinstance(op, list):
                op = op[0]
            odeconfig.e_ops_data.append(op.data.data)
            odeconfig.e_ops_ind.append(op.data.indices)
            odeconfig.e_ops_ptr.append(op.data.indptr)
            odeconfig.e_ops_isherm.append(op.isherm)

        odeconfig.e_ops_data = array(odeconfig.e_ops_data)
        odeconfig.e_ops_ind = array(odeconfig.e_ops_ind)
        odeconfig.e_ops_ptr = array(odeconfig.e_ops_ptr)
        odeconfig.e_ops_isherm = array(odeconfig.e_ops_isherm)
    #----

    #take care of collapse operators, if any
    if any(c_ops):
        odeconfig.c_num = len(c_ops)
        for c_op in c_ops:
            if isinstance(c_op, list):
                c_op = c_op[0]
            n_op = c_op.dag() * c_op
            odeconfig.c_ops_data.append(c_op.data.data)
            odeconfig.c_ops_ind.append(c_op.data.indices)
            odeconfig.c_ops_ptr.append(c_op.data.indptr)
            #norm ops
            odeconfig.n_ops_data.append(n_op.data.data)
            odeconfig.n_ops_ind.append(n_op.data.indices)
            odeconfig.n_ops_ptr.append(n_op.data.indptr)
        #to array
        odeconfig.c_ops_data = array(odeconfig.c_ops_data)
        odeconfig.c_ops_ind = array(odeconfig.c_ops_ind)
        odeconfig.c_ops_ptr = array(odeconfig.c_ops_ptr)

        odeconfig.n_ops_data = array(odeconfig.n_ops_data)
        odeconfig.n_ops_ind = array(odeconfig.n_ops_ind)
        odeconfig.n_ops_ptr = array(odeconfig.n_ops_ptr)
    #----

    #--------------------------------------------
    # START CONSTANT H & C_OPS CODE
    #--------------------------------------------
    if odeconfig.tflag == 0:
        if odeconfig.cflag:
            odeconfig.c_const_inds = arange(len(c_ops))
            for c_op in c_ops:
                n_op = c_op.dag() * c_op
                H -= 0.5j * n_op  #combine Hamiltonian and collapse terms into one
        #construct Hamiltonian data structures
        if options.tidy:
            H = H.tidyup(options.atol)
        odeconfig.h_data = -1.0j * H.data.data
        odeconfig.h_ind = H.data.indices
        odeconfig.h_ptr = H.data.indptr
    #----

    #--------------------------------------------
    # START STRING BASED TIME-DEPENDENCE
    #--------------------------------------------
    elif odeconfig.tflag in array([1, 10, 11]):
        #take care of arguments for collapse operators, if any
        if any(args):
            for item in args.items():
                odeconfig.c_args.append(item[1])
        #constant Hamiltonian / string-type collapse operators
        if odeconfig.tflag == 1:
            H_inds = arange(1)
            H_tdterms = 0
            len_h = 1
            C_inds = arange(odeconfig.c_num)
            C_td_inds = array(c_stuff[2])  #find inds of time-dependent terms
            C_const_inds = setdiff1d(C_inds,
                                     C_td_inds)  #find inds of constant terms
            C_tdterms = [c_ops[k][1] for k in C_td_inds
                         ]  #extract time-dependent coefficients (strings)
            odeconfig.c_const_inds = C_const_inds  #store indicies of constant collapse terms
            odeconfig.c_td_inds = C_td_inds  #store indicies of time-dependent collapse terms

            for k in odeconfig.c_const_inds:
                H -= 0.5j * (c_ops[k].dag() * c_ops[k])
            if options.tidy:
                H = H.tidyup(options.atol)
            odeconfig.h_data = [H.data.data]
            odeconfig.h_ind = [H.data.indices]
            odeconfig.h_ptr = [H.data.indptr]
            for k in odeconfig.c_td_inds:
                op = c_ops[k][0].dag() * c_ops[k][0]
                odeconfig.h_data.append(-0.5j * op.data.data)
                odeconfig.h_ind.append(op.data.indices)
                odeconfig.h_ptr.append(op.data.indptr)
            odeconfig.h_data = -1.0j * array(odeconfig.h_data)
            odeconfig.h_ind = array(odeconfig.h_ind)
            odeconfig.h_ptr = array(odeconfig.h_ptr)
            #--------------------------------------------
            # END OF IF STATEMENT
            #--------------------------------------------

        #string-type Hamiltonian & at least one string-type collapse operator
        else:
            H_inds = arange(len(H))
            H_td_inds = array(h_stuff[2])  #find inds of time-dependent terms
            H_const_inds = setdiff1d(H_inds,
                                     H_td_inds)  #find inds of constant terms
            H_tdterms = [
                H[k][1] for k in H_td_inds
            ]  #extract time-dependent coefficients (strings or functions)
            H = array([sum(H[k] for k in H_const_inds)] +
                      [H[k][0] for k in H_td_inds
                       ])  #combine time-INDEPENDENT terms into one.
            len_h = len(H)
            H_inds = arange(len_h)
            odeconfig.h_td_inds = arange(
                1, len_h)  #store indicies of time-dependent Hamiltonian terms
            #if there are any collpase operators
            if odeconfig.c_num > 0:
                if odeconfig.tflag == 10:  #constant collapse operators
                    odeconfig.c_const_inds = arange(odeconfig.c_num)
                    for k in odeconfig.c_const_inds:
                        H[0] -= 0.5j * (c_ops[k].dag() * c_ops[k])
                    C_inds = arange(odeconfig.c_num)
                    C_tdterms = array([])
                #-----
                else:  #some time-dependent collapse terms
                    C_inds = arange(odeconfig.c_num)
                    C_td_inds = array(
                        c_stuff[2])  #find inds of time-dependent terms
                    C_const_inds = setdiff1d(
                        C_inds, C_td_inds)  #find inds of constant terms
                    C_tdterms = [
                        c_ops[k][1] for k in C_td_inds
                    ]  #extract time-dependent coefficients (strings)
                    odeconfig.c_const_inds = C_const_inds  #store indicies of constant collapse terms
                    odeconfig.c_td_inds = C_td_inds  #store indicies of time-dependent collapse terms
                    for k in odeconfig.c_const_inds:
                        H[0] -= 0.5j * (c_ops[k].dag() * c_ops[k])
            else:  #set empty objects if no collapse operators
                C_const_inds = arange(odeconfig.c_num)
                odeconfig.c_const_inds = arange(odeconfig.c_num)
                odeconfig.c_td_inds = array([])
                C_tdterms = array([])
                C_inds = array([])

            #tidyup
            if options.tidy:
                H = array([H[k].tidyup(options.atol) for k in range(len_h)])
            #construct data sets
            odeconfig.h_data = [H[k].data.data for k in range(len_h)]
            odeconfig.h_ind = [H[k].data.indices for k in range(len_h)]
            odeconfig.h_ptr = [H[k].data.indptr for k in range(len_h)]
            for k in odeconfig.c_td_inds:
                odeconfig.h_data.append(-0.5j * odeconfig.n_ops_data[k])
                odeconfig.h_ind.append(odeconfig.n_ops_ind[k])
                odeconfig.h_ptr.append(odeconfig.n_ops_ptr[k])
            odeconfig.h_data = -1.0j * array(odeconfig.h_data)
            odeconfig.h_ind = array(odeconfig.h_ind)
            odeconfig.h_ptr = array(odeconfig.h_ptr)
            #--------------------------------------------
            # END OF ELSE STATEMENT
            #--------------------------------------------

        #set execuatble code for collapse expectation values and spmv
        col_spmv_code = "state=odeconfig.colspmv(j,ODE.t,odeconfig.c_ops_data[j],odeconfig.c_ops_ind[j],odeconfig.c_ops_ptr[j],ODE.y"
        col_expect_code = "for i in odeconfig.c_td_inds: n_dp.append(odeconfig.colexpect(i,ODE.t,odeconfig.n_ops_data[i],odeconfig.n_ops_ind[i],odeconfig.n_ops_ptr[i],ODE.y"
        for kk in range(len(odeconfig.c_args)):
            col_spmv_code += ",odeconfig.c_args[" + str(kk) + "]"
            col_expect_code += ",odeconfig.c_args[" + str(kk) + "]"
        col_spmv_code += ")"
        col_expect_code += "))"
        odeconfig.col_spmv_code = compile(col_spmv_code, '<string>', 'exec')
        odeconfig.col_expect_code = compile(col_expect_code, '<string>',
                                            'exec')
        #----

        #setup ode args string
        odeconfig.string = ""
        data_range = range(len(odeconfig.h_data))
        for k in data_range:
            odeconfig.string += "odeconfig.h_data[" + str(
                k) + "],odeconfig.h_ind[" + str(
                    k) + "],odeconfig.h_ptr[" + str(k) + "]"
            if k != data_range[-1]:
                odeconfig.string += ","
        #attach args to ode args string
        if len(odeconfig.c_args) > 0:
            for kk in range(len(odeconfig.c_args)):
                odeconfig.string += "," + "odeconfig.c_args[" + str(kk) + "]"
        #----
        name = "rhs" + str(odeconfig.cgen_num)
        odeconfig.tdname = name
        cgen = Codegen(H_inds,
                       H_tdterms,
                       odeconfig.h_td_inds,
                       args,
                       C_inds,
                       C_tdterms,
                       odeconfig.c_td_inds,
                       type='mc')
        cgen.generate(name + ".pyx")
        #----
    #--------------------------------------------
    # END OF STRING TYPE TIME DEPENDENT CODE
    #--------------------------------------------

    #--------------------------------------------
    # START PYTHON FUNCTION BASED TIME-DEPENDENCE
    #--------------------------------------------
    elif odeconfig.tflag in array([2, 20, 22]):

        #take care of Hamiltonian
        if odeconfig.tflag == 2:  # constant Hamiltonian, at least one function based collapse operators
            H_inds = array([0])
            H_tdterms = 0
            len_h = 1
        else:  # function based Hamiltonian
            H_inds = arange(len(H))
            H_td_inds = array(h_stuff[1])  #find inds of time-dependent terms
            H_const_inds = setdiff1d(H_inds,
                                     H_td_inds)  #find inds of constant terms
            odeconfig.h_funcs = array([H[k][1] for k in H_td_inds])
            odeconfig.h_func_args = args
            Htd = array([H[k][0] for k in H_td_inds])
            odeconfig.h_td_inds = arange(len(Htd))
            H = sum(H[k] for k in H_const_inds)

        #take care of collapse operators
        C_inds = arange(odeconfig.c_num)
        C_td_inds = array(c_stuff[1])  #find inds of time-dependent terms
        C_const_inds = setdiff1d(C_inds,
                                 C_td_inds)  #find inds of constant terms
        odeconfig.c_const_inds = C_const_inds  #store indicies of constant collapse terms
        odeconfig.c_td_inds = C_td_inds  #store indicies of time-dependent collapse terms
        odeconfig.c_funcs = zeros(odeconfig.c_num, dtype=FunctionType)
        for k in odeconfig.c_td_inds:
            odeconfig.c_funcs[k] = c_ops[k][1]
        odeconfig.c_func_args = args

        #combine constant collapse terms with constant H and construct data
        for k in odeconfig.c_const_inds:
            H -= 0.5j * (c_ops[k].dag() * c_ops[k])
        if options.tidy:
            H = H.tidyup(options.atol)
            Htd = array(
                [Htd[j].tidyup(options.atol) for j in odeconfig.h_td_inds])
            #setup cosntant H terms data
        odeconfig.h_data = -1.0j * H.data.data
        odeconfig.h_ind = H.data.indices
        odeconfig.h_ptr = H.data.indptr

        #setup td H terms data
        odeconfig.h_td_data = array(
            [-1.0j * Htd[k].data.data for k in odeconfig.h_td_inds])
        odeconfig.h_td_ind = array(
            [Htd[k].data.indices for k in odeconfig.h_td_inds])
        odeconfig.h_td_ptr = array(
            [Htd[k].data.indptr for k in odeconfig.h_td_inds])
        #--------------------------------------------
        # END PYTHON FUNCTION BASED TIME-DEPENDENCE
        #--------------------------------------------

    #--------------------------------------------
    # START PYTHON FUNCTION BASED HAMILTONIAN
    #--------------------------------------------
    elif odeconfig.tflag == 3:
        #take care of Hamiltonian
        odeconfig.h_funcs = H
        odeconfig.h_func_args = args

        #take care of collapse operators
        odeconfig.c_const_inds = arange(odeconfig.c_num)
        odeconfig.c_td_inds = array([])  #find inds of time-dependent terms
        if len(odeconfig.c_const_inds) > 0:
            H = 0
            for k in odeconfig.c_const_inds:
                H -= 0.5j * (c_ops[k].dag() * c_ops[k])
            if options.tidy:
                H = H.tidyup(options.atol)
            odeconfig.h_data = -1.0j * H.data.data
            odeconfig.h_ind = H.data.indices
            odeconfig.h_ptr = H.data.indptr
Exemple #32
0
# To change this license header, choose License Headers in Project Properties.
# To change this template file, choose Tools | Templates
# and open the template in the editor.

import ESutils
import scipy as sp
from scipy import linalg as spl
from scipy import stats as sps
from matplotlib import pyplot as plt
import GPdc
import PES

nt = 20
d = 1
lb = sp.array([-1.] * d)
ub = sp.array([1.] * d)
[X, Y, S, D] = ESutils.gen_dataset(nt, d, lb, ub, GPdc.SQUEXP,
                                   sp.array([1.5, 0.15]))

G = PES.makeG(X, Y, S, D, GPdc.SQUEXP, sp.array([0., -1.]), sp.array([1., 1.]),
              18)
H = sp.vstack([i.hyp for i in G.kf])
f, a = plt.subplots(1)
a.plot(H[:, 0], H[:, 1], 'r.')

np = 100
sup = sp.linspace(-1, 1, np)
Dp = [[sp.NaN]] * np
Xp = sp.vstack([sp.array([i]) for i in sup])
Exemple #33
0
    print 'importing pyfits and scipy ' ''
    import pyfits, scipy
    print 'done importing'

    hdu = pyfits.PrimaryHDU()

    zcat = open(os.environ['bonn'] + '/' + cluster + '.zcat', 'w')
    for i in range(len(SeqNr_col)):
        zcat.write(
            str(SeqNr_col[i]) + ' ' + str(ra_col[i]) + ' ' + str(dec_col[i]) +
            ' ' + str(z_col[i]) + '\n')
    zcat.close()

    cols = []
    cols.append(
        pyfits.Column(name='Nr', format='J', array=scipy.array(SeqNr_col)))
    cols.append(pyfits.Column(name='Ra', format='D',
                              array=scipy.array(ra_col)))
    cols.append(
        pyfits.Column(name='Dec', format='D', array=scipy.array(dec_col)))
    cols.append(pyfits.Column(name='Z', format='D', array=scipy.array(z_col)))
    cols.append(
        pyfits.Column(name='FIELD_POS',
                      format='J',
                      array=scipy.ones(len(z_col))))
    coldefs = pyfits.ColDefs(cols)
    OBJECTS = pyfits.new_table(coldefs)
    OBJECTS.header.update('extname', 'OBJECTS')

    cols = []
    cols.append(pyfits.Column(name='OBJECT_POS', format='J', array=[1.]))
Exemple #34
0
 def get_aerosol(self, val):
     """ Interpolation in lookup table """
     extc = s.array([p(val) for p in self.aer_extc_interp])
     absc = s.array([p(val) for p in self.aer_absc_interp])
     return deepcopy(self.aer_wl), absc, extc, self.aer_asym
    if e % 2 == 1:
        return sp.dot(X, sp.dot(X, M)) % m
    return sp.dot(X, X) % m


def mod_exp(x, e, m):
    if e == 1:
        return x % m
    b = mod_exp(x, e / 2, m)
    if e % 2 == 1:
        return (b * b * x) % m
    return (b * b) % m


if __name__ == '__main__':
    x3 = sp.array([3, 2, 1, 2, 1, 0, 0, 1])
    M = sp.array([[3, 1, 0, 0, -2, -1, 0, 0], [1, 3, 0, 0, -1, -2, 0, 0],
                  [0, 0, 3, 1, 0, 0, -2, -1], [0, 0, 1, 3, 0, 0, -1, -2],
                  [1, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0],
                  [0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0]],
                 dtype=sp.int64)

    # Calculate C(10000)
    Z = mat_mod_exp(M, 9996, 2 * 2 * 3 * 13**7)
    X = Z.dot(x3) % (2 * 2 * 3 * 13**7)
    #print X
    print(
        (mod_exp(2, X[0], 13**8) * mod_exp(3, X[2], 13**8)) % 13**8)**3 % 13**8

    # Calculate C(C(C(10000))) mod 13^8
    m1a = 2**10 * 3 * 13**3  # phi(phi(phi(phi(phi(13**8)))))
Exemple #36
0
import scipy
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit

x = scipy.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
y = scipy.array([21, 16.5, 13, 11, 9.5, 8.5, 7.5, 7, 6.5, 6.3, 6.2])


def func(x, a, b, c):
    return a * scipy.exp(-b * x) + c


popt, pcov = curve_fit(func, x, y)

plt.scatter(x, y, c='b')
plt.plot(x, func(x, *popt), c='r')

plt.show()
Exemple #37
0
def spatially_correlated(target, weights=None, strel=None):
    r"""
    Generates pore seeds that are spatailly correlated with their neighbors.

    Parameters
    ----------
    target : OpenPNM Object
        The object which this model is associated with. This controls the
        length of the calculated array, and also provides access to other
        necessary properties.

    weights : list of ints, optional
        The [Nx,Ny,Nz] distances (in number of pores) in each direction that
        should be correlated.

    strel : array_like, optional (in place of weights)
        The option allows full control over the spatial correlation pattern by
        specifying the structuring element to be used in the convolution.

        The array should be a 3D array containing the strength of correlations
        in each direction.  Nonzero values indicate the strength, direction
        and extent of correlations.  The following would achieve a basic
        correlation in the z-direction:

    ::

        strel = sp.array([[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
                          [[0, 0, 0], [1, 1, 1], [0, 0, 0]],
                          [[0, 0, 0], [0, 0, 0], [0, 0, 0]]])

    Returns
    -------
    values : NumPy ndarray
        Array containing pore seed values.

    Notes
    -----
    This approach uses image convolution to replace each pore seed in the
    geoemtry with a weighted average of those around it.  It then converts the
    new seeds back to a random distribution by assuming they new seeds are
    normally distributed.

    Because is uses image analysis tools, it only works on Cubic networks.

    This is the appproached used by Gostick et al [2]_ to create an anistropic
    gas diffusion layer for fuel cell electrodes.

    References
    ----------
    .. [2] J. Gostick et al, Pore network modeling of fibrous gas diffusion
           layers for polymer electrolyte membrane fuel cells. J Power Sources
           v173, pp277–290 (2007)

    Examples
    --------
    >>> import openpnm as op
    >>> pn = op.network.Cubic(shape=[10, 10, 10])
    >>> Ps, Ts = pn.Ps, pn.Ts
    >>> geom = op.geometry.GenericGeometry(network=pn, pores=Ps, throats=Ts)
    >>> mod = op.models.geometry.pore_seed.spatially_correlated
    >>> geom.add_model(propname='pore.seed', model=mod, weights=[2, 2, 2])

    """
    import scipy.ndimage as spim
    network = target.project.network
    # The following will only work on Cubic networks
    x = network._shape[0]
    y = network._shape[1]
    z = network._shape[2]
    im = _sp.rand(x, y, z)
    if strel is None:  # Then generate a strel
        if sum(weights) == 0:
            # If weights of 0 are sent, then skip everything and return rands.
            return im.flatten()
        w = _sp.array(weights)
        strel = _sp.zeros(w * 2 + 1)
        strel[:, w[1], w[2]] = 1
        strel[w[0], :, w[2]] = 1
        strel[w[0], w[1], :] = 1
    im = spim.convolve(im, strel)
    # Convolution is no longer randomly distributed, so fit a gaussian
    # and find it's seeds
    im = (im - _sp.mean(im)) / _sp.std(im)
    im = 1 / 2 * _sp.special.erfc(-im / _sp.sqrt(2))
    values = im.flatten()
    values = values[network.pores(target.name)]
    return values
Exemple #38
0
def mcsolve(H,
            psi0,
            tlist,
            c_ops,
            e_ops,
            ntraj=500,
            args={},
            options=Odeoptions()):
    """Monte-Carlo evolution of a state vector :math:`|\psi \\rangle` for a given
    Hamiltonian and sets of collapse operators, and possibly, operators
    for calculating expectation values. Options for the underlying ODE solver are 
    given by the Odeoptions class.
    
    mcsolve supports time-dependent Hamiltonians and collapse operators using either
    Python functions of strings to represent time-dependent coefficients.  Note that, 
    the system Hamiltonian MUST have at least one constant term.
    
    As an example of a time-dependent problem, consider a Hamiltonian with two terms ``H0``
    and ``H1``, where ``H1`` is time-dependent with coefficient ``sin(w*t)``, and collapse operators
    ``C0`` and ``C1``, where ``C1`` is time-dependent with coeffcient ``exp(-a*t)``.  Here, w and a are
    constant arguments with values ``W`` and ``A``.  
    
    Using the Python function time-dependent format requires two Python functions,
    one for each collapse coefficient. Therefore, this problem could be expressed as::
    
        def H1_coeff(t,args):
            return sin(args['w']*t)
    
        def C1_coeff(t,args):
            return exp(-args['a']*t)
    
        H=[H0,[H1,H1_coeff]]
    
        c_op_list=[C0,[C1,C1_coeff]]
    
        args={'a':A,'w':W}
    
    or in String (Cython) format we could write::
    
        H=[H0,[H1,'sin(w*t)']]
    
        c_op_list=[C0,[C1,'exp(-a*t)']]
    
        args={'a':A,'w':W}
    
    Constant terms are preferably placed first in the Hamiltonian and collapse 
    operator lists.
    
    Parameters
    ----------
    H : qobj
        System Hamiltonian.
    psi0 : qobj 
        Initial state vector
    tlist : array_like 
        Times at which results are recorded.
    ntraj : int 
        Number of trajectories to run.
    c_ops : array_like 
        ``list`` or ``array`` of collapse operators.
    e_ops : array_like 
        ``list`` or ``array`` of operators for calculating expectation values.
    args : dict
        Arguments for time-dependent Hamiltonian and collapse operator terms.
    options : Odeoptions
        Instance of ODE solver options.
    
    Returns
    -------
    results : Odedata    
        Object storing all results from simulation.
        
    """
    if psi0.type != 'ket':
        raise Exception("Initial state must be a state vector.")
    odeconfig.options = options
    #set num_cpus to the value given in qutip.settings if none in Odeoptions
    if not odeconfig.options.num_cpus:
        odeconfig.options.num_cpus = qutip.settings.num_cpus
    #set initial value data
    if options.tidy:
        odeconfig.psi0 = psi0.tidyup(options.atol).full()
    else:
        odeconfig.psi0 = psi0.full()
    odeconfig.psi0_dims = psi0.dims
    odeconfig.psi0_shape = psi0.shape
    #set general items
    odeconfig.tlist = tlist
    if isinstance(ntraj, (list, ndarray)):
        odeconfig.ntraj = sort(ntraj)[-1]
    else:
        odeconfig.ntraj = ntraj
    #set norm finding constants
    odeconfig.norm_tol = options.norm_tol
    odeconfig.norm_steps = options.norm_steps
    #----

    #----------------------------------------------
    # SETUP ODE DATA IF NONE EXISTS OR NOT REUSING
    #----------------------------------------------
    if (not options.rhs_reuse) or (not odeconfig.tdfunc):
        #reset odeconfig collapse and time-dependence flags to default values
        _reset_odeconfig()

        #check for type of time-dependence (if any)
        time_type, h_stuff, c_stuff = _ode_checks(H, c_ops, 'mc')
        h_terms = len(h_stuff[0]) + len(h_stuff[1]) + len(h_stuff[2])
        c_terms = len(c_stuff[0]) + len(c_stuff[1]) + len(c_stuff[2])
        #set time_type for use in multiprocessing
        odeconfig.tflag = time_type

        #-Check for PyObjC on Mac platforms
        if sys.platform == 'darwin' and odeconfig.options.gui:
            try:
                import Foundation
            except:
                odeconfig.options.gui = False

        #check if running in iPython and using Cython compiling (then no GUI to work around error)
        if odeconfig.options.gui and odeconfig.tflag in array([1, 10, 11]):
            try:
                __IPYTHON__
            except:
                pass
            else:
                odeconfig.options.gui = False
        if qutip.settings.qutip_gui == "NONE":
            odeconfig.options.gui = False

        #check for collapse operators
        if c_terms > 0:
            odeconfig.cflag = 1
        else:
            odeconfig.cflag = 0

        #Configure data
        _mc_data_config(H, psi0, h_stuff, c_ops, c_stuff, args, e_ops, options)
        if odeconfig.tflag in array([1, 10,
                                     11]):  #compile time-depdendent RHS code
            os.environ['CFLAGS'] = '-O3 -w'
            import pyximport
            pyximport.install(
                setup_args={'include_dirs': [numpy.get_include()]})
            if odeconfig.tflag in array([1, 11]):
                code = compile(
                    'from ' + odeconfig.tdname +
                    ' import cyq_td_ode_rhs,col_spmv,col_expect', '<string>',
                    'exec')
                exec(code, globals())
                odeconfig.tdfunc = cyq_td_ode_rhs
                odeconfig.colspmv = col_spmv
                odeconfig.colexpect = col_expect
            else:
                code = compile(
                    'from ' + odeconfig.tdname + ' import cyq_td_ode_rhs',
                    '<string>', 'exec')
                exec(code, globals())
                odeconfig.tdfunc = cyq_td_ode_rhs
            try:
                os.remove(odeconfig.tdname + ".pyx")
            except:
                print("Error removing pyx file.  File not found.")
        elif odeconfig.tflag == 0:
            odeconfig.tdfunc = cyq_ode_rhs
    else:  #setup args for new parameters when rhs_reuse=True and tdfunc is given
        #string based
        if odeconfig.tflag in array([1, 10, 11]):
            if any(args):
                odeconfig.c_args = []
                arg_items = args.items()
                for k in range(len(args)):
                    odeconfig.c_args.append(arg_items[k][1])
        #function based
        elif odeconfig.tflag in array([2, 3, 20, 22]):
            odeconfig.h_func_args = args

    #load monte-carlo class
    mc = _MC_class()
    #RUN THE SIMULATION
    mc.run()

    #AFTER MCSOLVER IS DONE --------------------------------------

    #-------COLLECT AND RETURN OUTPUT DATA IN ODEDATA OBJECT --------------#
    output = Odedata()
    output.solver = 'mcsolve'
    #state vectors
    if mc.psi_out != None and odeconfig.options.mc_avg and odeconfig.cflag:
        output.states = parfor(_mc_dm_avg, mc.psi_out.T)
    elif mc.psi_out != None:
        output.states = mc.psi_out
    #expectation values
    elif mc.expect_out != None and odeconfig.cflag and odeconfig.options.mc_avg:  #averaging if multiple trajectories
        if isinstance(ntraj, int):
            output.expect = mean(mc.expect_out, axis=0)
        elif isinstance(ntraj, (list, ndarray)):
            output.expect = []
            for num in ntraj:
                expt_data = mean(mc.expect_out[:num], axis=0)
                data_list = []
                if any([op.isherm == False for op in e_ops]):
                    for k in range(len(e_ops)):
                        if e_ops[k].isherm:
                            data_list.append(real(expt_data[k]))
                        else:
                            data_list.append(expt_data[k])
                else:
                    data_list = [data for data in expt_data]
                output.expect.append(data_list)
    else:  #no averaging for single trajectory or if mc_avg flag (Odeoptions) is off
        if mc.expect_out != None:
            output.expect = mc.expect_out

    #simulation parameters
    output.times = odeconfig.tlist
    output.num_expect = odeconfig.e_num
    output.num_collapse = odeconfig.c_num
    output.ntraj = odeconfig.ntraj
    output.col_times = mc.collapse_times_out
    output.col_which = mc.which_op_out
    return output
Exemple #39
0
def mv(v):
   return sp.array([2*v[0],3*v[1]])
Exemple #40
0
def _mc_alg_evolve(nt, args):
    """
    Monte-Carlo algorithm returning state-vector or expectation values at times tlist for a single trajectory.
    """
    #get input data
    mc_alg_out, opt, tlist, num_times, seeds = args

    collapse_times = []  #times at which collapse occurs
    which_oper = []  # which operator did the collapse

    #SEED AND RNG AND GENERATE
    prng = RandomState(seeds[nt])
    rand_vals = prng.rand(
        2)  #first rand is collapse norm, second is which operator

    #CREATE ODE OBJECT CORRESPONDING TO DESIRED TIME-DEPENDENCE
    if odeconfig.tflag in array([1, 10, 11]):
        ODE = ode(odeconfig.tdfunc)
        code = compile('ODE.set_f_params(' + odeconfig.string + ')',
                       '<string>', 'exec')
        exec(code)
    elif odeconfig.tflag == 2:
        ODE = ode(_cRHStd)
    elif odeconfig.tflag in array([20, 22]):
        ODE = ode(_tdRHStd)
    elif odeconfig.tflag == 3:
        ODE = ode(_pyRHSc)
    else:
        ODE = ode(cyq_ode_rhs)
        ODE.set_f_params(odeconfig.h_data, odeconfig.h_ind, odeconfig.h_ptr)

    #initialize ODE solver for RHS
    ODE.set_integrator('zvode',
                       method=opt.method,
                       order=opt.order,
                       atol=opt.atol,
                       rtol=opt.rtol,
                       nsteps=opt.nsteps,
                       first_step=opt.first_step,
                       min_step=opt.min_step,
                       max_step=opt.max_step)

    #set initial conditions
    ODE.set_initial_value(odeconfig.psi0, tlist[0])
    #make array for collapse operator inds
    cinds = arange(odeconfig.c_num)

    #RUN ODE UNTIL EACH TIME IN TLIST
    for k in range(1, num_times):
        #ODE WHILE LOOP FOR INTEGRATE UP TO TIME TLIST[k]
        while ODE.t < tlist[k]:
            t_prev = ODE.t
            y_prev = ODE.y
            norm2_prev = norm(ODE.y, 2)**2
            ODE.integrate(
                tlist[k],
                step=1)  #integrate up to tlist[k], one step at a time.
            if not ODE.successful():
                raise Exception("ZVODE failed!")
            #check if ODE jumped over tlist[k], if so, integrate until tlist exactly
            if ODE.t > tlist[k]:
                ODE.set_initial_value(y_prev, t_prev)
                ODE.integrate(tlist[k], step=0)
                if not ODE.successful():
                    raise Exception("ZVODE failed!")
            norm2_psi = norm(ODE.y, 2)**2
            if norm2_psi <= rand_vals[0]:  # <== collpase has occured
                #find collpase time to within specified tolerance
                #---------------------------------------------------
                ii = 0
                t_final = ODE.t
                while ii < odeconfig.norm_steps:
                    ii += 1
                    #t_guess=t_prev+(rand_vals[0]-norm2_prev)/(norm2_psi-norm2_prev)*(t_final-t_prev)
                    t_guess = t_prev + log(norm2_prev / rand_vals[0]) / log(
                        norm2_prev / norm2_psi) * (t_final - t_prev)
                    ODE.set_initial_value(y_prev, t_prev)
                    ODE.integrate(t_guess, step=0)
                    if not ODE.successful():
                        raise Exception(
                            "ZVODE failed after adjusting step size!")
                    norm2_guess = norm(ODE.y, 2)**2
                    if abs(rand_vals[0] -
                           norm2_guess) < odeconfig.norm_tol * rand_vals[0]:
                        break
                    elif (norm2_guess < rand_vals[0]):
                        # t_guess is still > t_jump
                        t_final = t_guess
                        norm2_psi = norm2_guess
                    else:
                        # t_guess < t_jump
                        t_prev = t_guess
                        y_prev = ODE.y
                        norm2_prev = norm2_guess
                if ii > odeconfig.norm_steps:
                    raise Exception(
                        "Norm tolerance not reached. Increase accuracy of ODE solver or Odeoptions.norm_steps."
                    )
                #---------------------------------------------------
                collapse_times.append(ODE.t)
                #some string based collapse operators
                if odeconfig.tflag in array([1, 11]):
                    n_dp = [
                        mc_expect(odeconfig.n_ops_data[i],
                                  odeconfig.n_ops_ind[i],
                                  odeconfig.n_ops_ptr[i], 1, ODE.y)
                        for i in odeconfig.c_const_inds
                    ]
                    _locals = locals()
                    exec(
                        odeconfig.col_expect_code, globals(), _locals
                    )  #calculates the expectation values for time-dependent norm collapse operators
                    n_dp = array(_locals['n_dp'])

                #some Python function based collapse operators
                elif odeconfig.tflag in array([2, 20, 22]):
                    n_dp = [
                        mc_expect(odeconfig.n_ops_data[i],
                                  odeconfig.n_ops_ind[i],
                                  odeconfig.n_ops_ptr[i], 1, ODE.y)
                        for i in odeconfig.c_const_inds
                    ]
                    n_dp += [
                        abs(odeconfig.c_funcs[i](ODE.t, odeconfig.c_func_args))
                        **2 * mc_expect(odeconfig.n_ops_data[i],
                                        odeconfig.n_ops_ind[i],
                                        odeconfig.n_ops_ptr[i], 1, ODE.y)
                        for i in odeconfig.c_td_inds
                    ]
                    n_dp = array(n_dp)
                #all constant collapse operators.
                else:
                    n_dp = array([
                        mc_expect(odeconfig.n_ops_data[i],
                                  odeconfig.n_ops_ind[i],
                                  odeconfig.n_ops_ptr[i], 1, ODE.y)
                        for i in range(odeconfig.c_num)
                    ])

                #determine which operator does collapse
                kk = cumsum(n_dp / sum(n_dp))
                j = cinds[kk >= rand_vals[1]][0]
                which_oper.append(j)  #record which operator did collapse
                if j in odeconfig.c_const_inds:
                    state = spmv(odeconfig.c_ops_data[j],
                                 odeconfig.c_ops_ind[j],
                                 odeconfig.c_ops_ptr[j], ODE.y)
                else:
                    if odeconfig.tflag in array([1, 11]):
                        _locals = locals()
                        exec(
                            odeconfig.col_spmv_code, globals(), _locals
                        )  #calculates the state vector for  collapse by a time-dependent collapse operator
                        state = _locals['state']
                    else:
                        state = odeconfig.c_funcs[j](
                            ODE.t, odeconfig.c_func_args) * spmv(
                                odeconfig.c_ops_data[j],
                                odeconfig.c_ops_ind[j], odeconfig.c_ops_ptr[j],
                                ODE.y)
                state = state / norm(state, 2)
                ODE.set_initial_value(state, ODE.t)
                rand_vals = prng.rand(2)
        #-------------------------------------------------------

        ###--after while loop--####
        out_psi = ODE.y / norm(ODE.y, 2)
        if odeconfig.e_num == 0:
            if odeconfig.options.mc_avg:
                mc_alg_out[k] = out_psi * out_psi.conj().T
            else:
                mc_alg_out[k] = out_psi
        else:
            for jj in range(odeconfig.e_num):
                mc_alg_out[jj][k] = mc_expect(odeconfig.e_ops_data[jj],
                                              odeconfig.e_ops_ind[jj],
                                              odeconfig.e_ops_ptr[jj],
                                              odeconfig.e_ops_isherm[jj],
                                              out_psi)

    #RETURN VALUES
    if odeconfig.e_num == 0:
        if odeconfig.options.mc_avg:
            mc_alg_out = array([
                Qobj(k, [odeconfig.psi0_dims[0], odeconfig.psi0_dims[0]],
                     [odeconfig.psi0_shape[0], odeconfig.psi0_shape[0]],
                     fast='mc-dm') for k in mc_alg_out
            ])
        else:
            mc_alg_out = array([
                Qobj(k, odeconfig.psi0_dims, odeconfig.psi0_shape, fast='mc')
                for k in mc_alg_out
            ])
        return nt, mc_alg_out, array(collapse_times), array(which_oper)
    else:
        return nt, mc_alg_out, array(collapse_times), array(which_oper)
Exemple #41
0
"""
This scripts serves as a demonstration of Kalman Filter tracking of brownian motion data
"""

import scipy as sp
from scipy.linalg import inv
from functools import wraps

_t_delta = 0.5

_A = sp.array([[1, _t_delta], [0, 1]])
_H = sp.array([[1, 0], [0, 1]])
_R = None
_Q = sp.array([[0.001, 0], [0, .001]])


class KalmanFilter(object):
    """Class for implementing simple Kalman Filter, assumes no Control input"""
    def __init__(self, A=_A, H=_H, R=_R, Q=_Q):
        dim = A.shape[0]
        self.A = A  # Transition matrix
        self.H = H  # Extraction matrix
        self.R = R  # Covariance matrix, measurement noise
        self.Q = Q  # Covariance matrix, process noise
        self.x_mu_prior = sp.zeros([dim, 1])
        self.x_mu = sp.zeros([dim, 1])
        self.P_prior = sp.zeros([dim, dim])
        self.P = sp.zeros([dim, dim])
        self.P[-1][-1] = .001
        self.I = sp.identity(dim)
Exemple #42
0
    def __init__(self):

        #-----------------------------------#
        # INIT MC CLASS
        #-----------------------------------#

        #----MAIN OBJECT PROPERTIES--------------------#
        ##holds instance of the ProgressBar class
        self.bar = None
        ##holds instance of the Pthread class
        self.thread = None
        #Number of completed trajectories
        self.count = 0
        ##step-size for count attribute
        self.step = 1
        ##Percent of trajectories completed
        self.percent = 0.0
        ##used in implimenting the command line progress ouput
        self.level = 0.1
        ##times at which to output state vectors or expectation values
        ##number of time steps in tlist
        self.num_times = len(odeconfig.tlist)
        #holds seed for random number generator
        self.seed = None
        #holds expected time to completion
        self.st = None
        #number of cpus to be used
        self.cpus = odeconfig.options.num_cpus
        #set output variables, even if they are not used to simplify output code.
        self.psi_out = None
        self.expect_out = []
        self.collapse_times_out = None
        self.which_op_out = None

        #FOR EVOLUTION FOR NO COLLAPSE OPERATORS
        if odeconfig.c_num == 0:
            if odeconfig.e_num == 0:
                ##Output array of state vectors calculated at times in tlist
                self.psi_out = array(
                    [Qobj()] * self.num_times)  #preallocate array of Qobjs
            elif odeconfig.e_num != 0:  #no collpase expectation values
                ##List of output expectation values calculated at times in tlist
                self.expect_out = []
                for i in range(odeconfig.e_num):
                    if odeconfig.e_ops_isherm[
                            i]:  #preallocate real array of zeros
                        self.expect_out.append(zeros(self.num_times))
                    else:  #preallocate complex array of zeros
                        self.expect_out.append(
                            zeros(self.num_times, dtype=complex))
                    self.expect_out[i][0] = mc_expect(
                        odeconfig.e_ops_data[i], odeconfig.e_ops_ind[i],
                        odeconfig.e_ops_ptr[i], odeconfig.e_ops_isherm[i],
                        odeconfig.psi0)

        #FOR EVOLUTION WITH COLLAPSE OPERATORS
        elif odeconfig.c_num != 0:
            #preallocate #ntraj arrays for state vectors, collapse times, and which operator
            self.collapse_times_out = zeros((odeconfig.ntraj), dtype=ndarray)
            self.which_op_out = zeros((odeconfig.ntraj), dtype=ndarray)
            if odeconfig.e_num == 0:  # if no expectation operators, preallocate #ntraj arrays for state vectors
                self.psi_out = array([
                    zeros((self.num_times), dtype=object)
                    for q in range(odeconfig.ntraj)
                ])  #preallocate array of Qobjs
            else:  #preallocate array of lists for expectation values
                self.expect_out = [[] for x in range(odeconfig.ntraj)]
Exemple #43
0
def main():

    fm = fmtest()

    tstart = time.time()
    fm.run()
    tend = time.time()

    if 1:
        fig1 = pylab.figure(1, figsize=(12, 10), facecolor="w")
        fig2 = pylab.figure(2, figsize=(12, 10), facecolor="w")
        fig3 = pylab.figure(3, figsize=(12, 10), facecolor="w")

        Ns = 10000
        Ne = 100000

        fftlen = 8192
        winfunc = scipy.blackman

        # Plot transmitted signal
        fs = fm._if_rate

        d = fm.snk_tx.data()[Ns:Ns + Ne]
        sp1_f = fig1.add_subplot(2, 1, 1)

        X, freq = sp1_f.psd(d,
                            NFFT=fftlen,
                            noverlap=fftlen / 4,
                            Fs=fs,
                            window=lambda d: d * winfunc(fftlen),
                            visible=False)
        X_in = 10.0 * scipy.log10(abs(fftpack.fftshift(X)))
        f_in = scipy.arange(-fs / 2.0, fs / 2.0, fs / float(X_in.size))
        p1_f = sp1_f.plot(f_in, X_in, "b")
        sp1_f.set_xlim([min(f_in), max(f_in) + 1])
        sp1_f.set_ylim([-120.0, 20.0])

        sp1_f.set_title("Input Signal", weight="bold")
        sp1_f.set_xlabel("Frequency (Hz)")
        sp1_f.set_ylabel("Power (dBW)")

        Ts = 1.0 / fs
        Tmax = len(d) * Ts

        t_in = scipy.arange(0, Tmax, Ts)
        x_in = scipy.array(d)
        sp1_t = fig1.add_subplot(2, 1, 2)
        p1_t = sp1_t.plot(t_in, x_in.real, "b-o")
        #p1_t = sp1_t.plot(t_in, x_in.imag, "r-o")
        sp1_t.set_ylim([-5, 5])

        # Set up the number of rows and columns for plotting the subfigures
        Ncols = int(scipy.floor(scipy.sqrt(fm.num_rx_channels())))
        Nrows = int(scipy.floor(fm.num_rx_channels() / Ncols))
        if (fm.num_rx_channels() % Ncols != 0):
            Nrows += 1

        # Plot each of the channels outputs. Frequencies on Figure 2 and
        # time signals on Figure 3
        fs_o = fm._audio_rate
        for i in xrange(len(fm.snks)):
            # remove issues with the transients at the beginning
            # also remove some corruption at the end of the stream
            #    this is a bug, probably due to the corner cases
            d = fm.snks[i].data()[Ns:Ne]

            sp2_f = fig2.add_subplot(Nrows, Ncols, 1 + i)
            X, freq = sp2_f.psd(d,
                                NFFT=fftlen,
                                noverlap=fftlen / 4,
                                Fs=fs_o,
                                window=lambda d: d * winfunc(fftlen),
                                visible=False)
            #X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
            X_o = 10.0 * scipy.log10(abs(X))
            #f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
            f_o = scipy.arange(0, fs_o / 2.0, fs_o / 2.0 / float(X_o.size))
            p2_f = sp2_f.plot(f_o, X_o, "b")
            sp2_f.set_xlim([min(f_o), max(f_o) + 0.1])
            sp2_f.set_ylim([-120.0, 20.0])
            sp2_f.grid(True)

            sp2_f.set_title(("Channel %d" % i), weight="bold")
            sp2_f.set_xlabel("Frequency (kHz)")
            sp2_f.set_ylabel("Power (dBW)")

            Ts = 1.0 / fs_o
            Tmax = len(d) * Ts
            t_o = scipy.arange(0, Tmax, Ts)

            x_t = scipy.array(d)
            sp2_t = fig3.add_subplot(Nrows, Ncols, 1 + i)
            p2_t = sp2_t.plot(t_o, x_t.real, "b")
            p2_t = sp2_t.plot(t_o, x_t.imag, "r")
            sp2_t.set_xlim([min(t_o), max(t_o) + 1])
            sp2_t.set_ylim([-1, 1])

            sp2_t.set_xlabel("Time (s)")
            sp2_t.set_ylabel("Amplitude")

        pylab.show()
Exemple #44
0
from scipy.fftpack import fft, ifft
x = np.random.random(1024)
%timeit ifft(fft(x))

# %% [markdown] {"slideshow": {"slide_type": "slide"}}
# ## Linear algebra : scipy.linalg
# - Sovers, decompositions, eigen values. (same as numpy).
# - Matrix functions : expm, sinm, sinhm,...  
# - Block matrices diagonal, triangular, periodic,...

# %% {"slideshow": {"slide_type": "fragment"}}
import scipy.linalg as spl 
b=sp.ones(5)
A=sp.array([[1.,3.,0., 0.,0.],
           [ 2.,1.,-4, 0.,0.],
           [ 6.,1., 2,-3.,0.], 
           [ 0.,1., 4.,-2.,-3.], 
           [ 0.,0., 6.,-3., 2.]])
print("x=",spl.solve(A,b,sym_pos=False)) # LAPACK ( gesv ou posv )
AB=sp.array([[0.,3.,-4.,-3.,-3.],
             [1.,1., 2.,-2., 2.],
             [2.,1., 4.,-3., 0.],
             [6.,1., 6., 0., 0.]])
print("x=",spl.solve_banded((2,1),AB,b)) # LAPACK ( gbsv )


# %% {"slideshow": {"slide_type": "slide"}}
P,L,U = spl.lu(A) #  P A = L U
np.set_printoptions(precision=3)
for M in (P,L,U):
    print(M, end="\n"+20*"-"+"\n")
def coordinate_genot_ss(genotype_file=None,
                        hdf5_file=None,
                        genetic_map_dir=None,
                        check_mafs=False,
                        min_maf=0.01,
                        skip_coordination=False,
                        debug=False):
    """
    Assumes plink BED files.  Imputes missing genotypes.
    """
    from plinkio import plinkfile
    plinkf = plinkfile.PlinkFile(genotype_file)
    plinkf_dict = plinkfiles.get_phenotypes(plinkf)
    num_individs = plinkf_dict['num_individs']
    risk_scores = sp.zeros(num_individs)
    rb_risk_scores = sp.zeros(num_individs)
    num_common_snps = 0
    corr_list = []
    rb_corr_list = []

    if plinkf_dict['has_phenotype']:
        hdf5_file.create_dataset('y', data=plinkf_dict['phenotypes'])

    hdf5_file.create_dataset('fids', data=sp.array(plinkf_dict['fids'], dtype=util.fids_dtype))
    hdf5_file.create_dataset('iids', data=sp.array(plinkf_dict['iids'], dtype=util.iids_dtype))
    ssf = hdf5_file['sum_stats']

    cord_data_g = hdf5_file.create_group('cord_data')

    # Figure out chromosomes and positions by looking at SNPs.
    loci = plinkf.get_loci()
    plinkf.close()
    gf_chromosomes = [l.chromosome for l in loci]

    chromosomes = sp.unique(gf_chromosomes)
    chromosomes.sort()
    chr_dict = plinkfiles.get_chrom_dict(loci, chromosomes)

    tot_num_non_matching_nts = 0
    for chrom in chromosomes:
        chr_str = 'chrom_%d' % chrom
        print('Working on chromsome: %s' % chr_str)

        chrom_d = chr_dict[chr_str]
        try:
            ssg = ssf['chrom_%d' % chrom]
        except Exception as err_str:
            print(err_str)
            print('Did not find chromsome in SS dataset.')
            print('Continuing.')
            continue

        g_sids = chrom_d['sids']
        g_sid_set = set(g_sids)
        assert len(g_sid_set) == len(g_sids), 'Some SNPs appear to be duplicated?'
        ss_sids = (ssg['sids'][...]).astype(util.sids_u_dtype)
        ss_sid_set = set(ss_sids)
        assert len(ss_sid_set) == len(ss_sids), 'Some SNPs appear to be duplicated?'

        # Figure out filters:
        g_filter = sp.in1d(g_sids, ss_sids)
        ss_filter = sp.in1d(ss_sids, g_sids)

        # Order by SNP IDs
        g_order = sp.argsort(g_sids)
        ss_order = sp.argsort(ss_sids)

        g_indices = []
        for g_i in g_order:
            if g_filter[g_i]:
                g_indices.append(g_i)

        ss_indices = []
        for ss_i in ss_order:
            if ss_filter[ss_i]:
                ss_indices.append(ss_i)

        g_nts = chrom_d['nts']
        snp_indices = chrom_d['snp_indices']
        ss_nts = (ssg['nts'][...]).astype(util.nts_u_dtype)
        betas = ssg['betas'][...]
        log_odds = ssg['log_odds'][...]
        assert not sp.any(sp.isnan(betas)), 'Some SNP effect estimates are NANs (not a number)'
        assert not sp.any(sp.isinf(betas)), 'Some SNP effect estimates are INFs (infinite numbers)'

        num_non_matching_nts = 0
        num_ambig_nts = 0
        ok_nts = []
        if debug:
            print('Found %d SNPs present in both datasets' % (len(g_indices)))

        if 'freqs' in ssg:
            ss_freqs = ssg['freqs'][...]

        ok_indices = {'g': [], 'ss': []}
        for g_i, ss_i in zip(g_indices, ss_indices):

            # Is the nucleotide ambiguous?
            g_nt = [g_nts[g_i][0], g_nts[g_i][1]]

            if not skip_coordination:
                if tuple(g_nt) in util.ambig_nts:
                    num_ambig_nts += 1
                    tot_num_non_matching_nts += 1
                    continue

                if (not g_nt[0] in util.valid_nts) or (not g_nt[1] in util.valid_nts):
                    num_non_matching_nts += 1
                    tot_num_non_matching_nts += 1
                    continue

                ss_nt = ss_nts[ss_i]

                # Are the nucleotides the same?
                flip_nts = False
                os_g_nt = sp.array(
                    [util.opp_strand_dict[g_nt[0]], util.opp_strand_dict[g_nt[1]]])
                if not (sp.all(g_nt == ss_nt) or sp.all(os_g_nt == ss_nt)):
                    # Opposite strand nucleotides
                    flip_nts = (g_nt[1] == ss_nt[0] and g_nt[0] == ss_nt[1]) or (
                        os_g_nt[1] == ss_nt[0] and os_g_nt[0] == ss_nt[1])
                    if flip_nts:
                        betas[ss_i] = -betas[ss_i]
                        log_odds[ss_i] = -log_odds[ss_i]
                        if 'freqs' in ssg:
                            if ss_freqs[ss_i] > 0:
                                ss_freqs[ss_i] = 1 - ss_freqs[ss_i]
                    else:
                        num_non_matching_nts += 1
                        tot_num_non_matching_nts += 1

                        continue

            # everything seems ok.
            ok_indices['g'].append(g_i)
            ok_indices['ss'].append(ss_i)
            ok_nts.append(g_nt)

        if debug:
            print('%d SNPs were excluded due to ambiguous nucleotides.' % num_ambig_nts)
            print('%d SNPs were excluded due to non-matching nucleotides.' % num_non_matching_nts)

        # Resorting by position
        positions = sp.array(chrom_d['positions'])[ok_indices['g']]
        order = sp.argsort(positions)
        ok_indices['g'] = list(sp.array(ok_indices['g'])[order])
        ok_indices['ss'] = list(sp.array(ok_indices['ss'])[order])
        positions = positions[order]

        # Parse SNPs
        snp_indices = sp.array(chrom_d['snp_indices'])
        
        # Pinpoint where the SNPs are in the file.
        snp_indices = snp_indices[ok_indices['g']]
        raw_snps, freqs = plinkfiles.parse_plink_snps(
            genotype_file, snp_indices)
        if debug:
            print('Parsed a %dX%d (SNP) genotype matrix'%(raw_snps.shape[0],raw_snps.shape[1]))

        snp_stds = sp.sqrt(2 * freqs * (1 - freqs))  
        snp_means = freqs * 2  

        betas = betas[ok_indices['ss']]
        log_odds = log_odds[ok_indices['ss']]
        ps = ssg['ps'][...][ok_indices['ss']]
        nts = sp.array(ok_nts)[order]
        sids = (ssg['sids'][...]).astype(util.sids_u_dtype)
        sids = sids[ok_indices['ss']]

        # Check SNP frequencies..
        if check_mafs and 'freqs' in ssg:
            ss_freqs = ss_freqs[ok_indices['ss']]
            # Assuming freq less than 0 is missing data
            freq_discrepancy_snp = sp.absolute(ss_freqs - (1 - freqs)) > 0.15
            # Filter SNPs that doesn't have MAF info from sumstat
            freq_discrepancy_snp = sp.logical_and(freq_discrepancy_snp, ss_freqs>0)
            freq_discrepancy_snp = sp.logical_and(freq_discrepancy_snp, ss_freqs<1)
            if sp.any(freq_discrepancy_snp):
                print('Warning: %d SNPs appear to have high frequency '
                      'discrepancy between summary statistics and validation sample' %
                      sp.sum(freq_discrepancy_snp))

                # Filter freq_discrepancy_snps
                ok_freq_snps = sp.logical_not(freq_discrepancy_snp)
                raw_snps = raw_snps[ok_freq_snps]
                snp_stds = snp_stds[ok_freq_snps]
                snp_means = snp_means[ok_freq_snps]
                freqs = freqs[ok_freq_snps]
                ps = ps[ok_freq_snps]
                positions = positions[ok_freq_snps]
                nts = nts[ok_freq_snps]
                sids = sids[ok_freq_snps]
                betas = betas[ok_freq_snps]
                log_odds = log_odds[ok_freq_snps]

        # Filter minor allele frequency SNPs.
        maf_filter = (freqs > min_maf) * (freqs < (1 - min_maf))
        maf_filter_sum = sp.sum(maf_filter)
        n_snps = len(maf_filter)
        assert maf_filter_sum <= n_snps, "Problems when filtering SNPs with low minor allele frequencies"
        if sp.sum(maf_filter) < n_snps:
            raw_snps = raw_snps[maf_filter]
            snp_stds = snp_stds[maf_filter]
            snp_means = snp_means[maf_filter]
            freqs = freqs[maf_filter]
            ps = ps[maf_filter]
            positions = positions[maf_filter]
            nts = nts[maf_filter]
            sids = sids[maf_filter]
            betas = betas[maf_filter]
            log_odds = log_odds[maf_filter]

            print('%d SNPs with MAF < %0.3f were filtered' % (n_snps - maf_filter_sum, min_maf))

        print('%d SNPs were retained on chromosome %d.' % (maf_filter_sum, chrom))

        rb_prs = sp.dot(sp.transpose(raw_snps), log_odds)
        if debug and plinkf_dict['has_phenotype']:
            print('Normalizing SNPs')
            snp_means.shape = (len(raw_snps), 1)
            snp_stds.shape = (len(raw_snps), 1)
            snps = (raw_snps - snp_means) / snp_stds
            assert snps.shape == raw_snps.shape, 'Problems when normalizing SNPs (set to have variance 1 and 0 mean)'
            snp_stds = snp_stds.flatten()
            snp_means = snp_means.flatten()
            prs = sp.dot(sp.transpose(snps), betas)
            corr = sp.corrcoef(plinkf_dict['phenotypes'], prs)[0, 1]
            corr_list.append(corr)
            print('PRS correlation for chromosome %d was %0.4f when predicting into LD ref data' % (chrom, corr))
            rb_corr = sp.corrcoef(plinkf_dict['phenotypes'], rb_prs)[0, 1]
            rb_corr_list.append(rb_corr)
            print('Raw effect sizes PRS correlation for chromosome %d was %0.4f when predicting into LD ref data' % (chrom, rb_corr))

        sid_set = set(sids)
        if genetic_map_dir is not None:
            genetic_map = []
            with gzip.open(genetic_map_dir + 'chr%d.interpolated_genetic_map.gz' % chrom) as f:
                for line in f:
                    l = line.split()
                    if l[0] in sid_set:
                        genetic_map.append(l[0])
        else:
            genetic_map = None

        coord_data_dict = {'chrom': 'chrom_%d' % chrom, 
                           'raw_snps_ref': raw_snps, 
                           'snp_stds_ref': snp_stds, 
                           'snp_means_ref': snp_means, 
                           'freqs_ref': freqs,
                           'ps': ps,
                           'positions': positions,
                           'nts': nts,
                           'sids': sids,
                           'genetic_map': genetic_map,
                           'betas': betas,
                           'log_odds': log_odds,
                           'log_odds_prs': rb_prs}
        
        write_coord_data(cord_data_g, coord_data_dict)
        
        if debug and plinkf_dict['has_phenotype']:
            rb_risk_scores += rb_prs
            risk_scores += prs
        num_common_snps += len(betas)

    if debug and plinkf_dict['has_phenotype']:
        
        # Now calculate the prediction R^2
        corr = sp.corrcoef(plinkf_dict['phenotypes'], risk_scores)[0, 1]
        rb_corr = sp.corrcoef(plinkf_dict['phenotypes'], rb_risk_scores)[0, 1]
        print('PRS R2 prediction accuracy for the whole genome was %0.4f (corr=%0.4f) when predicting into LD ref data' % (corr ** 2, corr))
        print('Log-odds (effects) PRS R2 prediction accuracy for the whole genome was %0.4f (corr=%0.4f) when predicting into LD ref data' % (rb_corr ** 2, rb_corr))
    print('There were %d SNPs in common' % num_common_snps)
    print('In all, %d SNPs were excluded due to nucleotide issues.' % tot_num_non_matching_nts)
    print('Done coordinating genotypes and summary statistics datasets.')
Exemple #46
0
    def run(self,
            minimum_epsilon: float,
            max_nr_populations: int,
            min_acceptance_rate: float = 0.,
            **kwargs) -> History:
        """
        Run the ABCSMC model selection until either of the stopping
        criteria is met.

        Parameters
        ----------

        minimum_epsilon: float
            Stop if epsilon is smaller than minimum epsilon specified here.

        max_nr_populations: int
            The maximum number of populations. Stop if this number is reached.

        min_acceptance_rate: float, optional
            Minimal allowed acceptance rate. Sampling stops if a population
            has a lower rate.


        Population after population is sampled and particles which are close
        enough to the observed data are accepted and added to the next
        population.
        If an adaptive Epsilon is specified (this is the default), then
        the acceptance threshold decreases from population to population
        automatically in a data dependent way.

        Sampling of further populations is stopped, when either of the three
        stopping criteria is met:

            * the maximum number of populations ``max_nr_populations``
              is reached,
            * the acceptance threshold for the last sampled population was
              smaller than ``minimum_epsilon``,
            * or the acceptance rate dropped below ``acceptance_rate``.

        The value of ``minimum_epsilon`` determines the quality of the ABCSMC
        approximation. The smaller the better. But sampling time also increases
        with decreasing ``minimum_epsilon``.

        This method can be called repeatedly to sample further populations
        after sampling was stopped once.
        """
        if len(kwargs) > 1:
            raise TypeError("Keyword arguments are not allowed.")

        if "acceptance_rate" in kwargs:
            warnings.warn(
                "The acceptance_rate argument is deprecated and "
                "removed in pyABc 0.9.0. "
                "Use min_acceptance_rate instead.",
                DeprecationWarning,
                stacklevel=2)
            min_acceptance_rate = kwargs["acceptance_rate"]

        t0 = self.history.max_t + 1
        self.history.start_time = datetime.datetime.now()
        # not saved as attribute b/c Mapper of type
        # "ipython_cluster" is not pickable
        for t in range(t0, t0 + max_nr_populations):
            # this is calculated here to avoid double initialization of medians
            current_eps = self.eps(t, self.history)
            abclogger.info('t:' + str(t) + ' eps:' + str(current_eps))
            self._fit_transitions(t)
            self._adapt_population(t)
            # cache model_probabilities to not to query the database so soften
            model_probabilities = self.history.get_model_probabilities(
                self.history.max_t)
            abclogger.debug('now submitting population ' + str(t))

            m = sp.array(model_probabilities.index)
            p = sp.array(model_probabilities.p)

            def sample_one():
                return self._generate_valid_proposal(t, m, p)

            def eval_one(par):
                return self._evaluate_proposal(*par, current_eps, t,
                                               model_probabilities)

            def accept_one(particle):
                return len(particle.distance_list) > 0

            population = self.sampler.sample_until_n_accepted(
                sample_one, eval_one, accept_one,
                self.population_strategy.nr_particles)

            population = [
                particle for particle in population
                if not isinstance(particle, Exception)
            ]
            abclogger.debug('population ' + str(t) + ' done')
            nr_evaluations = self.sampler.nr_evaluations_
            model_names = [model.name for model in self.models]
            self.history.append_population(t, current_eps, population,
                                           nr_evaluations, model_names)
            abclogger.debug('\ntotal nr simulations up to t =' + str(t) +
                            ' is ' + str(self.history.total_nr_simulations))

            current_acceptance_rate = len(population) / nr_evaluations
            if (current_eps <= minimum_epsilon
                    or (self.stop_if_only_single_model_alive
                        and self.history.nr_of_models_alive() <= 1)
                    or current_acceptance_rate < min_acceptance_rate):
                break

        self.history.done()
        return self.history
Exemple #47
0
 def permute(arr):
     assert arr.ndim == 1, "Only one dimensional arrays are supported"
     assert arr.shape == permutation.shape, "Array shapes don't match"
     return array([arr[i] for i in permutation])
def estimate(pv, m=None, verbose=False, lowmem=False, pi0=None):
    """
    Estimates q-values from p-values
    Args
    =====
    m: number of tests. If not specified m = pv.size
    verbose: print verbose messages? (default False)
    lowmem: use memory-efficient in-place algorithm
    pi0: if None, it's estimated as suggested in Storey and Tibshirani, 2003.
         For most GWAS this is not necessary, since pi0 is extremely likely to be
         1
    """
    assert (pv.min() >= 0
            and pv.max() <= 1), "p-values should be between 0 and 1"

    original_shape = pv.shape
    pv = pv.ravel(
    )  # flattens the array in place, more efficient than flatten()

    if m is None:
        m = float(len(pv))
    else:
        # the user has supplied an m
        m *= 1.0

    # if the number of hypotheses is small, just set pi0 to 1
    if len(pv) < 100 and pi0 is None:
        pi0 = 1.0
    elif pi0 is not None:
        pi0 = pi0
    else:
        # evaluate pi0 for different lambdas
        pi0 = []
        lam = sp.arange(0, 0.90, 0.01)
        counts = sp.array([(pv > i).sum() for i in sp.arange(0, 0.9, 0.01)])
        for l in range(len(lam)):
            pi0.append(counts[l] / (m * (1 - lam[l])))

        pi0 = sp.array(pi0)

        # fit natural cubic spline
        tck = interpolate.splrep(lam, pi0, k=3)
        pi0 = interpolate.splev(lam[-1], tck)
        if verbose:
            print("qvalues pi0=%.3f, estimated proportion of null features " %
                  pi0)

        if pi0 > 1:
            if verbose:
                print(
                    "got pi0 > 1 (%.3f) while estimating qvalues, setting it to 1"
                    % pi0)
            pi0 = 1.0

    assert (pi0 >= 0 and pi0 <= 1), "pi0 is not between 0 and 1: %f" % pi0

    if lowmem:
        # low memory version, only uses 1 pv and 1 qv matrices
        qv = sp.zeros((len(pv), ))
        last_pv = pv.argmax()
        qv[last_pv] = (pi0 * pv[last_pv] * m) / float(m)
        pv[last_pv] = -sp.inf
        prev_qv = last_pv
        for i in range(int(len(pv)) - 2, -1, -1):
            cur_max = pv.argmax()
            qv_i = (pi0 * m * pv[cur_max] / float(i + 1))
            pv[cur_max] = -sp.inf
            qv_i1 = prev_qv
            qv[cur_max] = min(qv_i, qv_i1)
            prev_qv = qv[cur_max]

    else:
        p_ordered = sp.argsort(pv)
        pv = pv[p_ordered]
        qv = pi0 * m / len(pv) * pv
        qv[-1] = min(qv[-1], 1.0)

        for i in range(len(pv) - 2, -1, -1):
            qv[i] = min(pi0 * m * pv[i] / (i + 1.0), qv[i + 1])

        # reorder qvalues
        qv_temp = qv.copy()
        qv = sp.zeros_like(qv)
        qv[p_ordered] = qv_temp

    # reshape qvalues
    qv = qv.reshape(original_shape)

    return qv
Exemple #49
0
""" Définition des paramètres:
    f : edo à intégrer
    X0: conditions initiales
    h : pas de calcul
"""
def RK4(f,X0,t,h):
    n = len(t)
    X = zeros((len(t),2) + shape(X0))
    X[0] = X0
    for i in range(n-1):        
        k1  = h*f(t[i], X[i])
        k2  = h*f(t[i] + h/2.0, X[i] + k1/2.0)
        k3  = h*f(t[i] + h/2.0, X[i] + k2/2.0)
        k4  = h*f(t[i] + h,X[i] + k3)
        X[i+1] = X[i] + (k1 + 2.0*k2 + 2.0*k3 + k4)/6.0
    return (X)
    
# définition des conditions d'intégration
dt = 0.1                # pas de temps
t = arange(0.0,10.0,dt) # vecteur temps pour l'intégration

# intégration de l'EDO du pendule simple par la méthode RK4
X0 = array([theta0, dtheta0])
y = RK4(Pendule,X0,t,dt)
    
# visualisation de la courbe intégrale
figure()

plot(t,y[:,1])
show()
 
def coordinate_genotypes_ss_w_ld_ref(genotype_file=None,
                                     reference_genotype_file=None,
                                     hdf5_file=None,
                                     genetic_map_dir=None,
                                     check_mafs=False,
                                     min_maf=0.01,
                                     skip_coordination=False, 
                                     debug=False):
    print('Coordinating things w genotype file: %s \nref. genot. file: %s' % (genotype_file, reference_genotype_file))
    
    from plinkio import plinkfile
    plinkf = plinkfile.PlinkFile(genotype_file)

    # Loads only the individuals... (I think?)
    plinkf_dict = plinkfiles.get_phenotypes(plinkf)

    # Figure out chromosomes and positions.
    if debug:
        print('Parsing validation bim file')
    loci = plinkf.get_loci()
    plinkf.close()
    gf_chromosomes = [l.chromosome for l in loci]

    chromosomes = sp.unique(gf_chromosomes)
    chromosomes.sort()

    chr_dict = plinkfiles.get_chrom_dict(loci, chromosomes)

    if debug:
        print('Parsing LD reference bim file')
    plinkf_ref = plinkfile.PlinkFile(reference_genotype_file)
    loci_ref = plinkf_ref.get_loci()
    plinkf_ref.close()

    chr_dict_ref = plinkfiles.get_chrom_dict(loci_ref, chromosomes)

    # Open HDF5 file and prepare out data
    assert not 'iids' in hdf5_file, 'Something is wrong with the HDF5 file, no individuals IDs were found.'
    if plinkf_dict['has_phenotype']:
        hdf5_file.create_dataset('y', data=plinkf_dict['phenotypes'])

    hdf5_file.create_dataset('fids', data=sp.array(plinkf_dict['fids'], dtype=util.fids_dtype))
    hdf5_file.create_dataset('iids', data=sp.array(plinkf_dict['iids'], dtype=util.iids_dtype))
    ssf = hdf5_file['sum_stats']
    cord_data_g = hdf5_file.create_group('cord_data')

    maf_adj_risk_scores = sp.zeros(plinkf_dict['num_individs'])
    num_common_snps = 0
    # corr_list = []

    tot_g_ss_nt_concord_count = 0
    tot_rg_ss_nt_concord_count = 0
    tot_g_rg_nt_concord_count = 0
    tot_num_non_matching_nts = 0

    # Now iterate over chromosomes
    for chrom in chromosomes:
        ok_indices = {'g': [], 'rg': [], 'ss': []}

        chr_str = 'chrom_%d' % chrom
        print('Working on chromsome: %s' % chr_str)

        chrom_d = chr_dict[chr_str]
        chrom_d_ref = chr_dict_ref[chr_str]
        try:
            ssg = ssf['chrom_%d' % chrom]
        except Exception as err_str:
            print(err_str)
            print('Did not find chromsome in SS dataset.')
            print('Continuing.')
            continue

        ssg = ssf['chrom_%d' % chrom]
        g_sids = chrom_d['sids']
        rg_sids = chrom_d_ref['sids']
        ss_sids = (ssg['sids'][...]).astype(util.sids_u_dtype)
        if debug:
            print('Found %d SNPs in validation data, %d SNPs in LD reference data, and %d SNPs in summary statistics.' % (len(g_sids), len(rg_sids), len(ss_sids)))
        common_sids = sp.intersect1d(ss_sids, g_sids)
        common_sids = sp.intersect1d(common_sids, rg_sids)
        if debug:
            print('Found %d SNPs on chrom %d that were common across all datasets' % (len(common_sids), chrom))

        ss_snp_map = []
        g_snp_map = []
        rg_snp_map = []

        ss_sid_dict = {}
        for i, sid in enumerate(ss_sids):
            ss_sid_dict[sid] = i

        g_sid_dict = {}
        for i, sid in enumerate(g_sids):
            g_sid_dict[sid] = i

        rg_sid_dict = {}
        for i, sid in enumerate(rg_sids):
            rg_sid_dict[sid] = i

        for sid in common_sids:
            g_snp_map.append(g_sid_dict[sid])

        # order by positions
        g_positions = sp.array(chrom_d['positions'])[g_snp_map]
        order = sp.argsort(g_positions)
        # order = order.tolist()
        g_snp_map = sp.array(g_snp_map)[order]
        g_snp_map = g_snp_map.tolist()
        common_sids = sp.array(common_sids)[order]

        # Get the other two maps
        for sid in common_sids:
            rg_snp_map.append(rg_sid_dict[sid])

        for sid in common_sids:
            ss_snp_map.append(ss_sid_dict[sid])

        g_nts = sp.array(chrom_d['nts'])
        rg_nts = sp.array(chrom_d_ref['nts'])
        rg_nts_ok = sp.array(rg_nts)[rg_snp_map]
        ss_nts = (ssg['nts'][...]).astype(util.nts_u_dtype)
        betas = ssg['betas'][...]
        log_odds = ssg['log_odds'][...]

        if 'freqs' in ssg:
            ss_freqs = ssg['freqs'][...]

        g_ss_nt_concord_count = sp.sum(
            g_nts[g_snp_map] == ss_nts[ss_snp_map]) / 2.0
        rg_ss_nt_concord_count = sp.sum(rg_nts_ok == ss_nts[ss_snp_map]) / 2.0
        g_rg_nt_concord_count = sp.sum(g_nts[g_snp_map] == rg_nts_ok) / 2.0
        if debug:
            print('Nucleotide concordance counts out of %d genotypes: vg-g: %d, vg-ss: %d, g-ss: %d' % (len(g_snp_map), g_rg_nt_concord_count, g_ss_nt_concord_count, rg_ss_nt_concord_count))
        tot_g_ss_nt_concord_count += g_ss_nt_concord_count
        tot_rg_ss_nt_concord_count += rg_ss_nt_concord_count
        tot_g_rg_nt_concord_count += g_rg_nt_concord_count

        num_non_matching_nts = 0
        num_ambig_nts = 0

        # Identifying which SNPs have nucleotides that are ok..
        ok_nts = []
        for g_i, rg_i, ss_i in zip(g_snp_map, rg_snp_map, ss_snp_map):

            # To make sure, is the SNP id the same?
            assert g_sids[g_i] == rg_sids[rg_i] == ss_sids[ss_i], 'Some issues with coordinating the genotypes.'

            g_nt = g_nts[g_i]
            if not skip_coordination:

                rg_nt = rg_nts[rg_i]
                ss_nt = ss_nts[ss_i]

                # Is the nucleotide ambiguous.
                g_nt = [g_nts[g_i][0], g_nts[g_i][1]]
                if tuple(g_nt) in util.ambig_nts:
                    num_ambig_nts += 1
                    tot_num_non_matching_nts += 1
                    continue

                # First check if nucleotide is sane?
                if (not g_nt[0] in util.valid_nts) or (not g_nt[1] in util.valid_nts):
                    num_non_matching_nts += 1
                    tot_num_non_matching_nts += 1
                    continue

                os_g_nt = sp.array(
                    [util.opp_strand_dict[g_nt[0]], util.opp_strand_dict[g_nt[1]]])

                flip_nts = False
                if not ((sp.all(g_nt == ss_nt) or sp.all(os_g_nt == ss_nt)) and (sp.all(g_nt == rg_nt) or sp.all(os_g_nt == rg_nt))):
                    if sp.all(g_nt == rg_nt) or sp.all(os_g_nt == rg_nt):
                        flip_nts = (g_nt[1] == ss_nt[0] and g_nt[0] == ss_nt[1]) or (
                            os_g_nt[1] == ss_nt[0] and os_g_nt[0] == ss_nt[1])
                        # Try flipping the SS nt
                        if flip_nts:
                            betas[ss_i] = -betas[ss_i]
                            log_odds[ss_i] = -log_odds[ss_i]
                            if 'freqs' in ssg:
                                ss_freqs[ss_i] = 1 - ss_freqs[ss_i]
                        else:
                            if debug:
                                print("Nucleotides don't match after all?: g_sid=%s, ss_sid=%s, g_i=%d, ss_i=%d, g_nt=%s, ss_nt=%s" % \
                                      (g_sids[g_i], ss_sids[ss_i], g_i,
                                       ss_i, str(g_nt), str(ss_nt)))
                            num_non_matching_nts += 1
                            tot_num_non_matching_nts += 1
                            continue

                    else:
                        num_non_matching_nts += 1
                        tot_num_non_matching_nts += 1
                        continue
                        # Opposite strand nucleotides

            # everything seems ok.
            ok_indices['g'].append(g_i)
            ok_indices['rg'].append(rg_i)
            ok_indices['ss'].append(ss_i)

            ok_nts.append(g_nt)

        if debug:
            print('%d SNPs had ambiguous nucleotides.' % num_ambig_nts)
            print('%d SNPs were excluded due to nucleotide issues.' % num_non_matching_nts)
            print('%d SNPs were retained on chromosome %d.' % (len(ok_indices['g']), chrom))

        # Resorting by position
        positions = sp.array(chrom_d['positions'])[ok_indices['g']]

        # Now parse SNPs ..
        snp_indices = sp.array(chrom_d['snp_indices'])
        # Pinpoint where the SNPs are in the file.
        snp_indices = snp_indices[ok_indices['g']]
        raw_snps, freqs = plinkfiles.parse_plink_snps(
            genotype_file, snp_indices)

        snp_indices_ref = sp.array(chrom_d_ref['snp_indices'])
        # Pinpoint where the SNPs are in the file.
        snp_indices_ref = snp_indices_ref[ok_indices['rg']]
        raw_ref_snps, freqs_ref = plinkfiles.parse_plink_snps(
            reference_genotype_file, snp_indices_ref)

        snp_stds_ref = sp.sqrt(2 * freqs_ref * (1 - freqs_ref))
        snp_means_ref = freqs_ref * 2

        snp_stds = sp.sqrt(2 * freqs * (1 - freqs))
        snp_means = freqs * 2

        betas = betas[ok_indices['ss']]  
        log_odds = log_odds[ok_indices['ss']]  

        ps = ssg['ps'][...][ok_indices['ss']]
        nts = sp.array(ok_nts)  # [order]
        sids = (ssg['sids'][...]).astype(util.sids_u_dtype)
        sids = sids[ok_indices['ss']]


        # Check SNP frequencies..
        if check_mafs and 'freqs' in ssg:
            ss_freqs = ss_freqs[ok_indices['ss']]
            freq_discrepancy_snp = sp.absolute(ss_freqs - (1 - freqs)) > 0.15 #Array of np.bool values
            if sp.any(freq_discrepancy_snp):
                print('Warning: %d SNPs were filtered due to high allele frequency discrepancy between summary statistics and validation sample' % sp.sum(freq_discrepancy_snp))

                # Filter freq_discrepancy_snps
                ok_freq_snps = sp.logical_not(freq_discrepancy_snp)
                raw_snps = raw_snps[ok_freq_snps]
                snp_stds = snp_stds[ok_freq_snps]
                snp_means = snp_means[ok_freq_snps]
                raw_ref_snps = raw_ref_snps[ok_freq_snps]
                snp_stds_ref = snp_stds_ref[ok_freq_snps]
                snp_means_ref = snp_means_ref[ok_freq_snps]
                freqs = freqs[ok_freq_snps]
                freqs_ref = freqs_ref[ok_freq_snps]
                ps = ps[ok_freq_snps]
                positions = positions[ok_freq_snps]
                nts = nts[ok_freq_snps]
                sids = sids[ok_freq_snps]
                betas = betas[ok_freq_snps]
                log_odds = log_odds[ok_freq_snps]

        # Filter minor allele frequency SNPs.
        maf_filter = (freqs > min_maf) * (freqs < (1 - min_maf))
        maf_filter_sum = sp.sum(maf_filter)
        n_snps = len(maf_filter)
        assert maf_filter_sum <= n_snps, "Problems when filtering SNPs with low minor allele frequencies"
        if sp.sum(maf_filter) < n_snps:
            raw_snps = raw_snps[maf_filter]
            snp_stds = snp_stds[maf_filter]
            snp_means = snp_means[maf_filter]
            raw_ref_snps = raw_ref_snps[maf_filter]
            snp_stds_ref = snp_stds_ref[maf_filter]
            snp_means_ref = snp_means_ref[maf_filter]
            freqs = freqs[maf_filter]
            freqs_ref = freqs_ref[maf_filter]
            ps = ps[maf_filter]
            positions = positions[maf_filter]
            nts = nts[maf_filter]
            sids = sids[maf_filter]
            betas = betas[maf_filter]
            log_odds = log_odds[maf_filter]

        maf_adj_prs = sp.dot(log_odds, raw_snps)
        if debug and plinkf_dict['has_phenotype']:
            maf_adj_corr = sp.corrcoef(
                plinkf_dict['phenotypes'], maf_adj_prs)[0, 1]
            print('Log odds, per genotype PRS correlation w phenotypes for chromosome %d was %0.4f' % (chrom, maf_adj_corr))

        genetic_map = []
        if genetic_map_dir is not None:
            with gzip.open(genetic_map_dir + 'chr%d.interpolated_genetic_map.gz' % chrom) as f:
                for line in f:
                    l = line.split()
#                     if l[0] in sid_set:
#                         genetic_map.append(l[0])
        else:
            genetic_map = None

        coord_data_dict = {'chrom': 'chrom_%d' % chrom, 
                           'raw_snps_ref': raw_ref_snps, 
                           'snp_stds_ref': snp_stds_ref, 
                           'snp_means_ref': snp_means_ref, 
                           'freqs_ref': freqs_ref,
                           'ps': ps,
                           'positions': positions,
                           'nts': nts,
                           'sids': sids,
                           'genetic_map': genetic_map,
                           'betas': betas,
                           'log_odds': log_odds,
                           'log_odds_prs': maf_adj_prs,
                           'raw_snps_val':raw_snps,
                           'snp_stds_val':snp_stds,
                           'snp_means_val':snp_means,
                           'freqs_val':freqs}
          
        write_coord_data(cord_data_g, coord_data_dict)

        # risk_scores += prs
        maf_adj_risk_scores += maf_adj_prs
        num_common_snps += len(betas)

    # Now calculate the prediction r^2
    if debug and plinkf_dict['has_phenotype']:
        maf_adj_corr = sp.corrcoef(
            plinkf_dict['phenotypes'], maf_adj_risk_scores)[0, 1]
        print('Log odds, per PRS correlation for the whole genome was %0.4f (r^2=%0.4f)' % (maf_adj_corr, maf_adj_corr ** 2))
    print('Overall nucleotide concordance counts: g_rg: %d, g_ss: %d, rg_ss: %d' % (tot_g_rg_nt_concord_count, tot_g_ss_nt_concord_count, tot_rg_ss_nt_concord_count))
    print('There were %d SNPs in common' % num_common_snps)
    print('In all, %d SNPs were excluded due to nucleotide issues.' % tot_num_non_matching_nts)
    print('Done!')
    def __init__(self,
                 challenge_func,
                 ns=10,
                 npop1=20,
                 pr=0.3,
                 beta=0.85,
                 npop2=20,
                 w=0.7,
                 c1=1.5,
                 c2=1.5):
        # Tamanho das populacoes
        seed()
        self.ns = ns
        self.npop1 = npop1
        self.npop2 = npop2
        # Parametros do DE
        self.beta = beta
        self.pr = pr
        # Parametros do PSO
        self.c1 = c1
        self.c2 = c2
        self.w = w
        # Funcao que representa problema desafio
        self.fc = challenge_func
        # Respostas do problema desafio
        #self.pso = pso(fitness_func = challenge_func,npop = npop2,w = w,c1 = c1,c2 = c2)
        self.ans1 = scipy.zeros(self.npop1)
        self.ans2 = scipy.zeros(self.npop2)
        # Populacoes
        self.pop1 = []
        self.pop2 = []
        # Gera pop1 e pop2 e resolve problema desafio
        for i in scipy.arange(self.npop1):
            self.ans1[i], aux = self.resolve_desafio(self.gera_individuo())
            self.pop1.append(aux.copy())

        for i in scipy.arange(self.npop2):
            self.ans2[i], aux = self.resolve_desafio(self.gera_individuo())
            self.pop2.append(aux.copy())

        self.pop1 = scipy.array(self.pop1)
        self.pop2 = scipy.array(self.pop2)

        self.hall_of_fame1 = []
        for i in scipy.arange(15):
            self.hall_of_fame1.insert(
                0,
                scipy.hstack((self.ans1.min(), self.pop1[self.ans1.argmin()])))

        self.hall_of_fame2 = []
        for i in scipy.arange(15):
            #self.hall_of_fame2.insert(0,scipy.hstack((self.pso.fit[0],self.pso.pop[0])))
            self.hall_of_fame2.insert(
                0,
                scipy.hstack((self.ans2.min(), self.pop2[self.ans2.argmin()])))

        # Funcoes fitness das populacoes
        self.fit1 = scipy.zeros(self.npop1)
        self.fit2 = scipy.zeros(self.npop2)

        for i in scipy.arange(self.npop2):
            self.fit2[i] = self.avalia_aptidao2(self.ans2[i])

        for i in scipy.arange(self.npop1):
            self.fit1[i] = self.avalia_aptidao1(self.ans1[i])

        # inicializa velocidades iniciais do PSO
        self.v = scipy.zeros(self.pop2.shape)
        # guarda o melhor fitness de cada particula PSO
        self.bfp = scipy.copy(self.pop2)
        self.bfp_fitness = scipy.copy(self.fit2)
        self.bfp_ans = scipy.copy(self.ans2)
        # guarda o melhor fitness global PSO
        self.bfg = self.pop2[self.bfp_fitness.argmax()].copy()
        self.bfg_fitness = self.bfp_fitness.max().copy()
        self.bfg_ans = self.bfp_ans[self.bfp_fitness.argmax()].copy()
Exemple #52
0
def Pendule(theta,t):
    return array([theta[1], -omega0**2*sin(theta[0])])
Exemple #53
0
def lattice_spheres(shape: List[int],
                    radius: int,
                    offset: int = 0,
                    lattice: str = 'sc'):
    r"""
    Generates a cubic packing of spheres in a specified lattice arrangement

    Parameters
    ----------
    shape : list
        The size of the image to generate in [Nx, Ny, Nz] where N is the
        number of voxels in each direction.  For a 2D image, use [Nx, Ny].

    radius : scalar
        The radius of spheres (circles) in the packing

    offset : scalar
        The amount offset (+ or -) to add between sphere centers.

    lattice : string
        Specifies the type of lattice to create.  Options are:

        'sc' - Simple Cubic (default)

        'fcc' - Face Centered Cubic

        'bcc' - Body Centered Cubic

        For 2D images, 'sc' gives a square lattice and both 'fcc' and 'bcc'
        give a triangular lattice.

    Returns
    -------
    image : ND-array
        A boolean array with ``True`` values denoting the pore space
    """
    print(78 * '―')
    print('lattice_spheres: Generating ' + lattice + ' lattice')
    r = radius
    shape = sp.array(shape)
    if sp.size(shape) == 1:
        shape = sp.full((3, ), int(shape))
    im = sp.zeros(shape, dtype=bool)
    im = im.squeeze()

    # Parse lattice type
    lattice = lattice.lower()
    if im.ndim == 2:
        if lattice in ['sc']:
            lattice = 'sq'
        if lattice in ['bcc', 'fcc']:
            lattice = 'tri'

    if lattice in ['sq', 'square']:
        spacing = 2 * r
        s = int(spacing / 2) + sp.array(offset)
        coords = sp.mgrid[r:im.shape[0] - r:2 * s, r:im.shape[1] - r:2 * s]
        im[coords[0], coords[1]] = 1
    elif lattice in ['tri', 'triangular']:
        spacing = 2 * sp.floor(sp.sqrt(2 * (r**2))).astype(int)
        s = int(spacing / 2) + offset
        coords = sp.mgrid[r:im.shape[0] - r:2 * s, r:im.shape[1] - r:2 * s]
        im[coords[0], coords[1]] = 1
        coords = sp.mgrid[s + r:im.shape[0] - r:2 * s,
                          s + r:im.shape[1] - r:2 * s]
        im[coords[0], coords[1]] = 1
    elif lattice in ['sc', 'simple cubic', 'cubic']:
        spacing = 2 * r
        s = int(spacing / 2) + sp.array(offset)
        coords = sp.mgrid[r:im.shape[0] - r:2 * s, r:im.shape[1] - r:2 * s,
                          r:im.shape[2] - r:2 * s]
        im[coords[0], coords[1], coords[2]] = 1
    elif lattice in ['bcc', 'body cenetered cubic']:
        spacing = 2 * sp.floor(sp.sqrt(4 / 3 * (r**2))).astype(int)
        s = int(spacing / 2) + offset
        coords = sp.mgrid[r:im.shape[0] - r:2 * s, r:im.shape[1] - r:2 * s,
                          r:im.shape[2] - r:2 * s]
        im[coords[0], coords[1], coords[2]] = 1
        coords = sp.mgrid[s + r:im.shape[0] - r:2 * s,
                          s + r:im.shape[1] - r:2 * s,
                          s + r:im.shape[2] - r:2 * s]
        im[coords[0], coords[1], coords[2]] = 1
    elif lattice in ['fcc', 'face centered cubic']:
        spacing = 2 * sp.floor(sp.sqrt(2 * (r**2))).astype(int)
        s = int(spacing / 2) + offset
        coords = sp.mgrid[r:im.shape[0] - r:2 * s, r:im.shape[1] - r:2 * s,
                          r:im.shape[2] - r:2 * s]
        im[coords[0], coords[1], coords[2]] = 1
        coords = sp.mgrid[r:im.shape[0] - r:2 * s, s + r:im.shape[1] - r:2 * s,
                          s + r:im.shape[2] - r:2 * s]
        im[coords[0], coords[1], coords[2]] = 1
        coords = sp.mgrid[s + r:im.shape[0] - r:2 * s, s:im.shape[1] - r:2 * s,
                          s + r:im.shape[2] - r:2 * s]
        im[coords[0], coords[1], coords[2]] = 1
        coords = sp.mgrid[s + r:im.shape[0] - r:2 * s,
                          s + r:im.shape[1] - r:2 * s, s:im.shape[2] - r:2 * s]
        im[coords[0], coords[1], coords[2]] = 1
    im = ~(spim.distance_transform_edt(~im) < r)
    return im
Exemple #54
0
def CCOMB(vectors):
    return (sp.array(VECTSUM(vectors)) / float(len(vectors))).tolist()  
Exemple #55
0
 def getObservation(self):
     self.obs = array([self.state%2, (self.state/2)%2, 1])    
     return self.obs
Exemple #56
0
def noise(shape: List[int],
          porosity=None,
          octaves: int = 3,
          frequency: int = 32,
          mode: str = 'simplex'):
    r"""
    Generate a field of spatially correlated random noise using the Perlin
    noise algorithm, or the updated Simplex noise algorithm.

    Parameters
    ----------
    shape : array_like
        The size of the image to generate in [Nx, Ny, Nz] where N is the
        number of voxels.

    porosity : float
        If specified, this will threshold the image to the specified value
        prior to returning.  If no value is given (the default), then the
        scalar noise field is returned.

    octaves : int
        Controls the *texture* of the noise, with higher octaves giving more
        complex features over larger length scales.

    frequency : array_like
        Controls the relative sizes of the features, with higher frequencies
        giving larger features.  A scalar value will apply the same frequency
        in all directions, given an isotropic field; a vector value will
        apply the specified values along each axis to create anisotropy.

    mode : string
        Which noise algorithm to use, either ``'simplex'`` (default) or
        ``'perlin'``.

    Returns
    -------
    image : ND-array
        If porosity is given, then a boolean array with ``True`` values
        denoting the pore space is returned.  If not, then normally
        distributed and spatially correlated randomly noise is returned.

    Notes
    -----
    This method depends the a package called 'noise' which must be
    compiled. It is included in the Anaconda distribution, or a platform
    specific binary can be downloaded.

    See Also
    --------
    porespy.tools.norm_to_uniform

    """
    try:
        import noise
    except ModuleNotFoundError:
        raise Exception("The noise package must be installed")
    shape = sp.array(shape)
    if sp.size(shape) == 1:
        Lx, Ly, Lz = sp.full((3, ), int(shape))
    elif len(shape) == 2:
        Lx, Ly = shape
        Lz = 1
    elif len(shape) == 3:
        Lx, Ly, Lz = shape
    if mode == 'simplex':
        f = noise.snoise3
    else:
        f = noise.pnoise3
    frequency = sp.atleast_1d(frequency)
    if frequency.size == 1:
        freq = sp.full(shape=[
            3,
        ], fill_value=frequency[0])
    elif frequency.size == 2:
        freq = sp.concatenate((frequency, [1]))
    else:
        freq = sp.array(frequency)
    im = sp.zeros(shape=[Lx, Ly, Lz], dtype=float)
    for x in range(Lx):
        for y in range(Ly):
            for z in range(Lz):
                im[x, y, z] = f(x=x / freq[0],
                                y=y / freq[1],
                                z=z / freq[2],
                                octaves=octaves)
    im = im.squeeze()
    if porosity:
        im = norm_to_uniform(im, scale=[0, 1])
        im = im < porosity
    return im
def detect_drop_off_cliff(timeseries, metric_name, metric_expiration_time,
                          metric_min_average, metric_min_average_seconds,
                          metric_trigger):
    """
    A timeseries is anomalous if the average of the last 10 datapoints is
    <trigger> times greater than the last data point AND if has not experienced
    frequent cliff drops in the last 10 datapoints.  If the timeseries has
    experienced 2 or more datapoints of equal or less values in the last 10 or
    EXPIRATION_TIME datapoints or is less than a MIN_AVERAGE if set the
    algorithm determines the datapoint as NOT anomalous but normal.
    This algorithm is most suited to timeseries with most datapoints being > 100
    (e.g high rate).  The arbitrary <trigger> values become more noisy with
    lower value datapoints, but it still matches drops off cliffs.
    """

    if len(timeseries) < 30:
        return False

    try:
        int_end_timestamp = int(timeseries[-1][0])
        # Determine resolution of the data set
        int_second_last_end_timestamp = int(timeseries[-2][0])
        resolution = int_end_timestamp - int_second_last_end_timestamp
        ten_data_point_seconds = resolution * 10
        ten_datapoints_ago = int_end_timestamp - ten_data_point_seconds

        ten_datapoint_array = scipy.array([
            x[1] for x in timeseries
            if x[0] <= int_end_timestamp and x[0] > ten_datapoints_ago
        ])
        ten_datapoint_array_len = len(ten_datapoint_array)
    except:
        return None

    if ten_datapoint_array_len > 3:

        ten_datapoint_min_value = np.amin(ten_datapoint_array)

        # DO NOT handle if negative integers are in the range, where is the
        # bottom of the cliff if a range goes negative?  Testing with a noisy
        # sine wave timeseries that had a drop off cliff introduced to the
        # postive data side, proved that this algorithm does work on timeseries
        # with data values in the negative range
        if ten_datapoint_min_value < 0:
            return False

        # autocorrect if there are there are 0s in the data, like graphite expects
        # 1 datapoint every 10 seconds, but the timeseries only has 1 every 60 seconds

        ten_datapoint_max_value = np.amax(ten_datapoint_array)

        # The algorithm should have already fired in 10 datapoints if the
        # timeseries dropped off a cliff, these are all zero
        if ten_datapoint_max_value == 0:
            return False

        # If the lowest is equal to the highest, no drop off cliff
        if ten_datapoint_min_value == ten_datapoint_max_value:
            return False

#        if ten_datapoint_max_value < 10:
#            return False

        ten_datapoint_array_sum = np.sum(ten_datapoint_array)
        ten_datapoint_value = int(ten_datapoint_array[-1])
        ten_datapoint_average = ten_datapoint_array_sum / ten_datapoint_array_len
        ten_datapoint_value = int(ten_datapoint_array[-1])

        # if a metric goes up and down a lot and falls off a cliff frequently
        # it is normal, not anomalous
        try:
            number_of_similar_datapoints = len(
                np.where(ten_datapoint_array <= ten_datapoint_min_value))
        except:
            return None

        # Detect once only - to make this useful and not noisy the first one
        # would have already fired and detected the drop
        if number_of_similar_datapoints > 2:
            return False

        # evaluate against 20 datapoints as well, reduces chatter on peaky ones
        # tested with 60 as well and 20 is sufficient to filter noise
        try:
            twenty_data_point_seconds = resolution * 20
            twenty_datapoints_ago = int_end_timestamp - twenty_data_point_seconds
            twenty_datapoint_array = scipy.array([
                x[1] for x in timeseries
                if x[0] <= int_end_timestamp and x[0] > twenty_datapoints_ago
            ])
            number_of_similar_datapoints_in_twenty = len(
                np.where(twenty_datapoint_array <= ten_datapoint_min_value))
            if number_of_similar_datapoints_in_twenty > 2:
                return False
        except:
            return None

        # Check if there is a similar data point in EXPIRATION_TIME
        # Disabled as redis alert cache will filter on this


#        if metric_expiration_time > twenty_data_point_seconds:
#            expiration_time_data_point_seconds = metric_expiration_time
#            expiration_time_datapoints_ago = int_end_timestamp - metric_expiration_time
#            expiration_time_datapoint_array = scipy.array([x[1] for x in timeseries if x[0] <= int_end_timestamp and x[0] > expiration_time_datapoints_ago])
#            number_of_similar_datapoints_in_expiration_time = len(np.where(expiration_time_datapoint_array <= ten_datapoint_min_value))
#            if number_of_similar_datapoints_in_expiration_time > 2:
#                return False

        if metric_min_average > 0 and metric_min_average_seconds > 0:
            try:
                min_average = metric_min_average
                min_average_seconds = metric_min_average_seconds
                min_average_data_point_seconds = resolution * min_average_seconds
                #            min_average_datapoints_ago = int_end_timestamp - (resolution * min_average_seconds)
                min_average_datapoints_ago = int_end_timestamp - min_average_seconds
                min_average_array = scipy.array([
                    x[1] for x in timeseries if x[0] <= int_end_timestamp
                    and x[0] > min_average_datapoints_ago
                ])
                min_average_array_average = np.sum(min_average_array) / len(
                    min_average_array)
                if min_average_array_average < min_average:
                    return False
            except:
                return None

        if ten_datapoint_max_value < 101:
            trigger = 15
        if ten_datapoint_max_value < 20:
            trigger = ten_datapoint_average / 2
        if ten_datapoint_max_value > 100:
            trigger = 100
        if ten_datapoint_value == 0:
            # Cannot divide by 0, so set to 0.1 to prevent error
            ten_datapoint_value = 0.1
        if ten_datapoint_value == 1:
            trigger = 1
        if ten_datapoint_value == 1 and ten_datapoint_max_value < 10:
            trigger = 0.1
        if ten_datapoint_value == 0.1 and ten_datapoint_average < 1 and ten_datapoint_array_sum < 7:
            trigger = 7

        ten_datapoint_result = ten_datapoint_average / ten_datapoint_value
        if int(ten_datapoint_result) > trigger:
            if ENABLE_BOUNDARY_DEBUG:
                logger.info(
                    'detect_drop_off_cliff - %s, ten_datapoint_value = %s, ten_datapoint_array_sum = %s, ten_datapoint_average = %s, trigger = %s, ten_datapoint_result = %s'
                    %
                    (str(int_end_timestamp), str(ten_datapoint_value),
                     str(ten_datapoint_array_sum), str(ten_datapoint_average),
                     str(trigger), str(ten_datapoint_result)))
            return True

    return False
Exemple #58
0
    def initZ(self,
              pmean,
              pvar,
              qmean,
              qvar,
              qE=None,
              qE2=None,
              covariates=None,
              scale_covariates=None):
        """Method to initialise the latent variables

        PARAMETERS
        ----------
        pmean:
        pvar:
        qmean
        qvar
        qE
        qE2
        covariates: nd array
            matrix of covariates with dimensions (nsamples,ncovariates)
        scale_covariates: 
        """

        # Initialise mean of the Q distribution
        if qmean is not None:
            if isinstance(qmean, str):
                if qmean == "random":  # Random initialisation of latent variables
                    qmean = stats.norm.rvs(loc=0,
                                           scale=1,
                                           size=(self.N, self.K))

                elif qmean == "orthogonal":  # Latent variables are initialised randomly but ensuring orthogonality
                    pca = sklearn.decomposition.PCA(n_components=self.K,
                                                    copy=True,
                                                    whiten=True)
                    pca.fit(
                        stats.norm.rvs(loc=0, scale=1, size=(self.N, 9999)).T)
                    qmean = pca.components_.T

                elif qmean == "pca":  # Latent variables are initialised from PCA in the concatenated matrix
                    pca = sklearn.decomposition.PCA(n_components=self.K,
                                                    copy=True,
                                                    whiten=True)
                    pca.fit(s.concatenate(self.data, axis=0).T)
                    qmean = pca.components_.T

            elif isinstance(qmean, s.ndarray):
                assert qmean.shape == (self.N, self.K)

            elif isinstance(qmean, (int, float)):
                qmean = s.ones((self.N, self.K)) * qmean

            else:
                print("Wrong initialisation for Z")
                exit()

        # Add covariates
        if covariates is not None:
            assert scale_covariates != None, "If you use covariates also define data_opts['scale_covariates']"

            # Select indices for covaraites
            idx_covariates = s.array(range(covariates.shape[1]))

            # Center and scale the covariates to match the prior distribution N(0,1)
            # to-do: this needs to be improved to take the particular mean and var into account
            # covariates[scale_covariates] = (covariates - covariates.mean(axis=0)) / covariates.std(axis=0)
            scale_covariates = s.array(scale_covariates)
            covariates[:, scale_covariates] = (
                covariates[:, scale_covariates] -
                s.nanmean(covariates[:, scale_covariates], axis=0)) / s.nanstd(
                    covariates[:, scale_covariates], axis=0)

            # Set to zero the missing values in the covariates
            covariates[s.isnan(covariates)] = 0.
            qmean[:, idx_covariates] = covariates
        else:
            idx_covariates = None

        # Initialise the node
        # self.Z = Constant_Node(dim=(self.N,self.K), value=qmean)
        self.Z = Z_Node(dim=(self.N, self.K),
                        pmean=s.ones((self.N, self.K)) * pmean,
                        pvar=s.ones((self.K, )) * pvar,
                        qmean=s.ones((self.N, self.K)) * qmean,
                        qvar=s.ones((self.N, self.K)) * qvar,
                        qE=qE,
                        qE2=qE2,
                        idx_covariates=idx_covariates)
        self.nodes["Z"] = self.Z
def sharpe(R, w):
    var = portfolio_var(R, w)
    mean_return = mean(R, axis=0)
    ret = sp.array(mean_return)
    return (sp.dot(w, ret) - rf) / sqrt(var)
def autoaggregate_ts(timeseries, autoaggregate_value):
    """
    This is a utility function used to autoaggregate a timeseries.  If a
    timeseries data set has 6 datapoints per minute but only one data value
    every minute then autoaggregate will aggregate every autoaggregate_value.
    """
    if ENABLE_BOUNDARY_DEBUG:
        logger.info('debug :: autoaggregate_ts at %s seconds' %
                    str(autoaggregate_value))

    aggregated_timeseries = []

    if len(timeseries) < 60:
        if ENABLE_BOUNDARY_DEBUG:
            logger.info(
                'debug :: autoaggregate_ts - timeseries less than 60 datapoints, TooShort'
            )
        raise TooShort()

    int_end_timestamp = int(timeseries[-1][0])
    last_hour = int_end_timestamp - 3600
    last_timestamp = int_end_timestamp
    next_timestamp = last_timestamp - int(autoaggregate_value)
    start_timestamp = last_hour

    if ENABLE_BOUNDARY_DEBUG:
        logger.info('debug :: autoaggregate_ts - aggregating from %s to %s' %
                    (str(start_timestamp), str(int_end_timestamp)))

    valid_timestamps = False
    try:
        valid_timeseries = int_end_timestamp - start_timestamp
        if valid_timeseries == 3600:
            valid_timestamps = True
    except Exception as e:
        logger.error('Algorithm error: %s' % traceback.format_exc())
        logger.error('error: %e' % e)
        aggregated_timeseries = []
        return aggregated_timeseries

    if valid_timestamps:
        try:
            # Check sane variables otherwise we can just hang here in a while loop
            while int(next_timestamp) > int(start_timestamp):
                value = np.sum(
                    scipy.array([
                        int(x[1]) for x in timeseries
                        if x[0] <= last_timestamp and x[0] > next_timestamp
                    ]))
                aggregated_timeseries += ((last_timestamp, value), )
                last_timestamp = next_timestamp
                next_timestamp = last_timestamp - autoaggregate_value
            aggregated_timeseries.reverse()
            return aggregated_timeseries
        except Exception as e:
            logger.error('Algorithm error: %s' % traceback.format_exc())
            logger.error('error: %e' % e)
            aggregated_timeseries = []
            return aggregated_timeseries
    else:
        logger.error(
            'could not aggregate - timestamps not valid for aggregation')
        aggregated_timeseries = []
        return aggregated_timeseries