Beispiel #1
0
def scenario_inplace_padded_C2R(batch,tic,toc):

  n = array([2*BENG_CHANNELS_],int32)
  inembed = array([16*(BENG_CHANNELS//16+1)],int32)
  onembed = array([2*inembed[0]],int32)
  plan = cufft.cufftPlanMany(1, n.ctypes.data, inembed.ctypes.data, 1, inembed[0],
  	                                       onembed.ctypes.data, 1, onembed[0],
  					       cufft.CUFFT_C2R, batch)

  data_shape = (batch,inembed[0])
  cpu_data = standard_normal(data_shape) + 1j * standard_normal(data_shape)
  cpu_data = cpu_data.astype(complex64)
  gpu_data  = cuda.mem_alloc(8*batch*inembed[0])		# complex64
  cuda.memcpy_htod(gpu_data,cpu_data)

  tic.record()
  cufft.cufftExecC2R(plan,int(gpu_data),int(gpu_data))
  toc.record()
  toc.synchronize()

  cpu_result = np.empty(batch*onembed[0],dtype=np.float32)
  cuda.memcpy_dtoh(cpu_result,gpu_data)
  cpu_result = cpu_result.reshape((batch,onembed[0]))[:,:2*BENG_CHANNELS_]/(2*BENG_CHANNELS_)
  result = irfft(cpu_data[:,:BENG_CHANNELS],axis=-1)
  print 'Batched in-place scenario'
  print 'test passed:',np.allclose(cpu_result,result)
  print 'GPU time:', tic.time_till(toc),' ms =  ',tic.time_till(toc)/(batch*0.5*13.128e-3),' x real (both SB)' 
Beispiel #2
0
    def step(self, f):
        atoms = self.atoms
        p = self.atoms.get_momenta()

        random1 = standard_normal(size=(len(atoms), 3))
        random2 = standard_normal(size=(len(atoms), 3))
        
        rrnd = self.get("sdpos") * random1
        prnd = (self.get("sdmom") * self.get("pmcor") * random1 +
                self.get("sdmom") * self.get("cnst") * random2)

        if self.fixcm:
            rrnd = rrnd - np.sum(rrnd, 0) / len(atoms)
            prnd = prnd - np.sum(prnd, 0) / len(atoms)
            factor = np.sqrt(self.natoms / (self.natoms - 1.0)) 
            rrnd *= factor
            prnd *= factor

        atoms.set_positions(atoms.get_positions() +
                            self.get("c1") * p +
                            self.get("c2") * f + rrnd)
        p *= self.get("act0")
        p += self.get("c3") * f + prnd
        atoms.set_momenta(p)
                      
        f = atoms.get_forces()
        atoms.set_momenta(atoms.get_momenta() + self.get("c4") * f)
        return f
def main():
    from matplotlib.pyplot import figure,plot, close
    from numpy.random import standard_normal,choice
    from numpy.linalg import qr
    from numpy import dot
    import CAMP_C
    #from myOmp import omp_naive as omp
    N=2000
    M=900
    K=100
    sigma_n=0.001
    A=standard_normal((N,N))+1j*standard_normal((N,N))
    (Q,R)=qr(A)
    i=choice(N,M,False)  
    A=Q[i,:]

    x=(standard_normal((N,1))+1j*standard_normal((N,1)))/sqrt(2)
    j=choice(N,N-K,False)
    x[j,:]=0
    
    y=dot(A,x)+sigma_n*standard_normal((M,1))
    xhat=CAMP_C.CAMP(A,y,1,True)
    print norm(x-xhat)/N
    close('all')
    plot(real(x))
    plot(real(xhat))
    figure()
    plot(imag(x))
    plot(imag(xhat))
Beispiel #4
0
    def step(self, f):
        atoms = self.atoms
        p = self.atoms.get_momenta()

        random1 = standard_normal(size=(len(atoms), 3))
        random2 = standard_normal(size=(len(atoms), 3))

        if self.communicator is not None:
            self.communicator.broadcast(random1, 0)
            self.communicator.broadcast(random2, 0)
        
        rrnd = self.sdpos * random1
        prnd = (self.sdmom * self.pmcor * random1 +
                self.sdmom * self.cnst * random2)

        if self.fixcm:
            rrnd = rrnd - np.sum(rrnd, 0) / len(atoms)
            prnd = prnd - np.sum(prnd, 0) / len(atoms)
            rrnd *= np.sqrt(self.natoms / (self.natoms - 1.0))
            prnd *= np.sqrt(self.natoms / (self.natoms - 1.0))

        atoms.set_positions(atoms.get_positions() +
                            self.c1 * p +
                            self.c2 * f + rrnd)
        p *= self.act0
        p += self.c3 * f + prnd
        atoms.set_momenta(p)
                      
        f = atoms.get_forces()
        atoms.set_momenta(atoms.get_momenta() + self.c4 * f)
        return f
Beispiel #5
0
def gen_sn(M, I, anti_paths=True, no_match=True):
    """
    Function to generate random numbers for simulation

    Parameters
    ==========
    M : int
        number of time intervals for discretization
    I : int
        number of paths to be simulated
    anti_paths : Boolean
        use of antithetic variables
    mo_math : Boolean
        use of moment matching
    """
    import numpy as np
    import numpy.random as npr

    if anti_paths is True:
        sn = npr.standard_normal((M+1, I/2))
        sn = np.concatenate((sn, -sn), axis=1)
    else:
        sn = npr.standard_normal((M+1, I))
    if no_match is True:
        sn = (sn - sn.mean())/sn.std()
    return sn
Beispiel #6
0
def gen_sn(M, I, anti_paths=False, mo_match=False):
	''' Function to generate random numbers for simulation with variance reduction

	from Y. Hilpisch, Python for Finance

	Parameters
	==========
	M : int
	    number of time intervals for discretization
	I : int
	    number of paths to be simulated
	anti_paths : Boolean
	    use of antithetic variates
	mo_match : Boolean
	    use of moment matching
	'''

	if anti_paths is True:
		sn = npr.standard_normal((M + 1, I / 2))
		sn = np.concatenate((sn, -sn), axis=1)
	else:
		sn = npr.standard_normal((M + 1, I))
	if mo_match is True:
		sn = (sn - sn.mean()) / sn.std()
	return sn
Beispiel #7
0
def gen_sn2(D, I, anti_paths=True, mo_match=True):
	''' Function to generate random numbers for simulation.

    from Y. Hilpisch, Python for Finance
    Modified by L. Wang for handling high dimensional data generation.

	Parameters
	==========
	D : int
	    number of dimensions
	I : int
	    number of paths to be simulated
	anti_paths : Boolean
	    use of antithetic variates
	mo_match : Boolean
	    use of moment matching
	'''
	import numpy as np
	import numpy.random as npr
	if anti_paths is True:
		sn = npr.standard_normal((D, I / 2))
		sn = np.concatenate((sn, -sn), axis=1)
	else:
		sn = npr.standard_normal((D, I))
	if mo_match is True:
		sn = (sn - sn.mean()) / sn.std()
	return sn
Beispiel #8
0
def extrapolate(im, vx, vy, sx, sy, output_times, num_trials=100):
	"""velocities vx, vy per frame
		standard deviations sx, sy
		output_times are times after the input frame time
	"""
	num_frames = len(output_times)
	prob = np.zeros((num_frames,)+ im.shape)
	for i in range(num_frames):
		
		spread_x = sx * output_times[i]
		spread_y = sy * output_times[i]
		for j in range(num_trials):
			ti = output_times[i]
			off_x = ti*(vx  +  sx*standard_normal())
			off_y = ti*(vy  +  sx*standard_normal())

			off_x = int(round(off_x))
			off_y = int(round(off_y))

			if abs(off_x) < 0.5*im.shape[1] and abs(off_y) < 0.5*im.shape[0]:
				shifted = shiftim(im, off_x, off_y)
			else:
				shifted = np.zeros(im.shape) 

			
			prob[i] += shifted


	return prob*(1.0/num_trials)
Beispiel #9
0
	def regr_data_prep(self,kk,N_i=1):
		''' Regression data preparation via nested simulations
		'''
		import customML as cm

		# --- Computation budget allocatoin ---
		N_o = int(kk/N_i)

		# --- portfolio price @ t = \tau via Nested simulations---
		t0 = time.time()
		ran1 = npr.standard_normal((N_o,1))
		S1 = np.zeros((N_o,1))
		S1[:] = self.S0
		S1[:] = S1[:] * np.exp((self.mu - 0.5*self.sigma*self.sigma)*self.tau + \
								self.sigma * np.sqrt(self.tau) * ran1[:])

		ran2 = npr.standard_normal((N_o,N_i))
		S2 = np.zeros((N_o,N_i))
		S2[:,:] = np.dot(S1[:],np.ones((1,N_i))) * np.exp((self.rfr - 0.5*self.sigma*self.sigma)*(self.T-self.tau) \
						+ self.sigma * np.sqrt(self.T-self.tau) * ran2[:,:])

		prob0 = (1.-np.exp(-2.*(np.log(np.dot(S1[:],np.ones((1,N_i)))/(self.H[0]*np.ones((N_o,N_i))))\
			                   *np.log(S2[:,:]/(self.H[0]*np.ones((N_o,N_i))))/(self.sigma**2)/(self.T-self.tau))))\
		                       *(np.dot(S1[:],np.ones((1,N_i))) >= self.H[0]*np.ones((N_o,N_i))).astype(float)\
		                       *(S2[:,:] >= self.H[0]*np.ones((N_o,N_i))).astype(float)
		prob1 = (1.-np.exp(-2.*(np.log(np.dot(S1[:],np.ones((1,N_i)))/(self.H[1]*np.ones((N_o,N_i))))\
			                   *np.log(S2[:,:]/(self.H[1]*np.ones((N_o,N_i))))/(self.sigma**2)/(self.T-self.tau))))\
		                       *(np.dot(S1[:],np.ones((1,N_i))) >= self.H[1]*np.ones((N_o,N_i))).astype(float)\
		                       *(S2[:,:] >= self.H[1]*np.ones((N_o,N_i))).astype(float)
		prob2 = (1.-np.exp(-2.*(np.log(np.dot(S1[:],np.ones((1,N_i)))/(self.H[2]*np.ones((N_o,N_i))))\
			                   *np.log(S2[:,:]/(self.H[2]*np.ones((N_o,N_i))))/(self.sigma**2)/(self.T-self.tau))))\
		                       *(np.dot(S1[:],np.ones((1,N_i))) >= self.H[2]*np.ones((N_o,N_i))).astype(float)\
		                       *(S2[:,:] >= self.H[2]*np.ones((N_o,N_i))).astype(float)


		Vtau0 = np.dot((np.maximum(self.K[0]-S2[:,:],0)*prob0), np.ones((N_i,1))) / \
						float(N_i) * np.exp(-self.rfr*(self.T-self.tau))
		Vtau1 = np.dot((np.maximum(self.K[1]-S2[:,:],0)*prob1), np.ones((N_i,1))) / \
						float(N_i) * np.exp(-self.rfr*(self.T-self.tau))
		Vtau2 = np.dot((np.maximum(self.K[2]-S2[:,:],0)*prob2), np.ones((N_i,1))) / \
						float(N_i) * np.exp(-self.rfr*(self.T-self.tau))


		ValueTau = Vtau0*self.pos[0] + Vtau1*self.pos[1] + Vtau2*self.pos[2]

		t_ns = time.time() - t0

		# prediction samples
		#ran3 = norm(loc=0, scale=1).ppf(lhs(D, samples=I_pred))
		stratified_gaussian  = np.array([(i-0.5)/self.I_pred for i in range(1,self.I_pred+1)])
		ran3 = norm.ppf(stratified_gaussian[:,np.newaxis])
		S_pred = np.zeros((self.I_pred,1))
		S_pred[:] = self.S0
		S_pred[:] = S_pred[:] * np.exp((self.mu - 0.5*self.sigma*self.sigma)*self.tau +\
										self.sigma*np.sqrt(self.tau) * ran3[:])

		self.X = S1
		self.X_pred = S_pred
		self.y = ValueTau
Beispiel #10
0
	def ns(self,kk,beta=1.):
		''' Standard nested simulations
		'''
		# --- Computation budget allocatoin ---
		N_o = np.int(np.ceil(kk**(2./3.)*beta))
		#print "Outer loop: %d" % N_o
		N_i = np.int(np.ceil(kk**(1./3.)/beta))
		#print "Inner loop: %d" % N_i
		#print "Total: %d" % (N_o*N_i)

		# --- portfolio loss distribution @ t = \tau via analytical formulas---
		t0 = time.time()
		ran1 = npr.standard_normal((N_o,1))
		S1 = np.zeros((N_o,1))
		S1[:] = self.S0
		S1[:] = S1[:] * np.exp((self.mu - 0.5*self.sigma*self.sigma)*self.tau + self.sigma *\
								np.sqrt(self.tau) * ran1[:])

		ran2 = npr.standard_normal((N_o,N_i))
		S2 = np.zeros((N_o,N_i))
		S2[:,:] = np.dot(S1[:],np.ones((1,N_i))) * np.exp((self.rfr - 0.5*self.sigma*self.sigma)*(self.T-self.tau) \
						+ self.sigma * np.sqrt(self.T-self.tau) * ran2[:,:])

		prob0 = (1.-np.exp(-2.*(np.log(np.dot(S1[:],np.ones((1,N_i)))/(self.H[0]*np.ones((N_o,N_i))))\
			                   *np.log(S2[:,:]/(self.H[0]*np.ones((N_o,N_i))))/(self.sigma**2)/(self.T-self.tau))))\
		                       *(np.dot(S1[:],np.ones((1,N_i))) >= self.H[0]*np.ones((N_o,N_i))).astype(float)\
		                       *(S2[:,:] >= self.H[0]*np.ones((N_o,N_i))).astype(float)
		prob1 = (1.-np.exp(-2.*(np.log(np.dot(S1[:],np.ones((1,N_i)))/(self.H[1]*np.ones((N_o,N_i))))\
			                   *np.log(S2[:,:]/(self.H[1]*np.ones((N_o,N_i))))/(self.sigma**2)/(self.T-self.tau))))\
		                       *(np.dot(S1[:],np.ones((1,N_i))) >= self.H[1]*np.ones((N_o,N_i))).astype(float)\
		                       *(S2[:,:] >= self.H[1]*np.ones((N_o,N_i))).astype(float)
		prob2 = (1.-np.exp(-2.*(np.log(np.dot(S1[:],np.ones((1,N_i)))/(self.H[2]*np.ones((N_o,N_i))))\
			                   *np.log(S2[:,:]/(self.H[2]*np.ones((N_o,N_i))))/(self.sigma**2)/(self.T-self.tau))))\
		                       *(np.dot(S1[:],np.ones((1,N_i))) >= self.H[2]*np.ones((N_o,N_i))).astype(float)\
		                       *(S2[:,:] >= self.H[2]*np.ones((N_o,N_i))).astype(float)


		Vtau0 = np.dot((np.maximum(self.K[0]-S2[:,:],0)*prob0), np.ones((N_i,1))) / \
				float(N_i) * np.exp(-self.rfr*(self.T-self.tau))
		Vtau1 = np.dot((np.maximum(self.K[1]-S2[:,:],0)*prob1), np.ones((N_i,1))) / \
				float(N_i) * np.exp(-self.rfr*(self.T-self.tau))
		Vtau2 = np.dot((np.maximum(self.K[2]-S2[:,:],0)*prob2), np.ones((N_i,1))) / \
				float(N_i) * np.exp(-self.rfr*(self.T-self.tau))


		ValueTau = Vtau0*self.pos[0] + Vtau1*self.pos[1] + Vtau2*self.pos[2]

		y = self.Value0 - ValueTau

		t_ns = time.time()-t0
		#print "%es spent in re-valuation" % t_ns

		L_ns = np.sort(y)
		var = scs.scoreatpercentile(L_ns, self.perc*100.)
		el = L_ns - self.c
		eel = np.mean(el.clip(0))
		#print "EEL estimated: %e" % eel

		return eel
Beispiel #11
0
 def __new__(cls, *args):
     if len(args) == 1:
         layers, = args
         W = [rnd.standard_normal((i, j))
              for i, j in zip(layers[:-1], layers[1:])]
         C = rnd.standard_normal(len(layers) - 1)
     else:
         W, C = args
     return super().__new__(cls, W, C)
    def __init__(self,timeSeries=None,
                 lenSeries=2**18,
                 numChannels=1,
                 fMin=400,fMax=800,
                 sampTime=None,
                 noiseRMS=0.1):
        """ Initializes the AmplitudeTimeSeries instance. 
        If a array is not passed, then a random whitenoise dataset is generated.
        Inputs: 
        Len -- Number of time data points (usually a power of 2) 2^38 gives about 65 seconds 
        of 400 MHz sampled data
        The time binning is decided by the bandwidth
        fMin -- lowest frequency (MHz)
        fMax -- highest frequency (MHz)
        noiseRMS -- RMS value of noise (TBD)
        noiseAlpha -- spectral slope (default is white noise) (TBD)
        ONLY GENERATES WHITE NOISE RIGHT NOW!
        """
        self.shape = (np.uint(numChannels),np.uint(lenSeries))
        self.fMax = fMax
        self.fMin = fMin        
        
        if sampTime is None:
            self.sampTime = np.uint(numChannels)*1E-6/(fMax-fMin)
        else:
            self.sampTime = sampTime

        if timeSeries is None:
            # then use the rest of the data to generate a random timeseries
            if VERBOSE:
                print "AmplitudeTimeSeries __init__ did not get new data, generating white noise data"

            self.timeSeries = np.complex64(noiseRMS*(np.float16(random.standard_normal(self.shape))
                                                     +np.float16(random.standard_normal(self.shape))*1j)/np.sqrt(2))
            
        else:
            if VERBOSE:
                print "AmplitudeTimeSeries __init__ got new data, making sure it is reasonable."

            if len(timeSeries.shape) == 1:
                self.shape = (1,timeSeries.shape[0])
                
            else:
                self.shape = timeSeries.shape

            self.timeSeries = np.reshape(np.complex64(timeSeries),self.shape)
            
            self.fMin = fMin
            self.fMax = fMax

            if sampTime is None:
                self.sampTime = numChannels*1E-6/(fMax-fMin)
            else:
                self.sampTime = sampTime

        return None
def RNG(M ,I ):
    if antiPaths ==True:
        randDummy = standard_normal (( 3 ,M+1 , I/2 ))
        rand = concatenate (( randDummy ,- randDummy ) ,2)
    else:
        rand = standard_normal ((3 ,M +1 ,I ))
    if moMatch ==True:
        rand = rand / std ( rand )
        rand = rand - mean ( rand )
    return rand
Beispiel #14
0
 def sample(self, n=None):
     """Return a multivariate normal sample."""
     if n is None:
         snsamps = random.standard_normal(self.ndim)
         return _mvnt.mvnsamp(self.mu, self.L, snsamps)
     samps = zeros((n, self.ndim), float)
     for i in range(n):
         snsamps = random.standard_normal(self.ndim)
         samps[i] = _mvnt.mvnsamp(self.mu, self.L, snsamps)
     return samps
def dataSimulation(coefs, errorCoef, intercept, size):
	inputs = []
	error = errorCoef*rd.standard_normal(size)
	y = error + intercept
	for i in range(len(coefs)):
		 inputs = inputs + [rd.standard_normal(size)]		 
		 y = y+coefs[i]*inputs[i]
	y = sign(y)
	inputs = list(zip(*inputs))
	return([y,inputs])
Beispiel #16
0
    def propagate(self, msg):

        """
        Propagates a message through the channel.

        Parameters
        ----------
        msg : 1D ndarray
                Message to propagate.

        Returns
        -------
        channel_output : 2D ndarray
                         Message after application of the fading and addition of noise.
                         channel_output[i] is th i-th received symbol of size nb_rx.

        Raises
        ------
        TypeError
                        If the input message is complex but the channel is real.

        AssertionError
                        If the noise standard deviation noise_std as not been set yet.
        """

        if isinstance(msg[0], complex) and not self.isComplex:
            raise TypeError('Trying to propagate a complex message in a real channel.')
        (nb_vect, mod) = divmod(len(msg), self.nb_tx)

        # Add padding if required
        if mod:
            msg = hstack((msg, zeros(self.nb_tx - mod)))
            nb_vect += 1

        # Reshape msg as vectors sent on each antennas
        msg = msg.reshape(nb_vect, -1)

        # Generate noises
        self.generate_noises((nb_vect, self.nb_rx))

        # Generate channel uncorrelated channel
        dims = (nb_vect, self.nb_rx, self.nb_tx)
        if self.isComplex:
            self.channel_gains = (standard_normal(dims) + 1j * standard_normal(dims)) * sqrt(0.5)
        else:
            self.channel_gains = standard_normal(dims)

        # Add correlation and mean
        einsum('ij,ajk,lk->ail', sqrtm(self.fading_param[2]), self.channel_gains, sqrtm(self.fading_param[1]),
               out=self.channel_gains, optimize='greedy')
        self.channel_gains += self.fading_param[0]

        # Generate outputs
        self.unnoisy_output = einsum('ijk,ik->ij', self.channel_gains, msg)
        return self.unnoisy_output + self.noises
Beispiel #17
0
def RNG(I):
    """Generate I random numbers following specified AP and MM parameters"""
    if AP == True:
        ran = standard_normal(I/2)
        ran = concatenate((ran, -ran))
    else:
        ran = standard_normal(I)
    if MM == True:
        ran = ran-mean(ran)
        ran = ran/std(ran)
    return ran
Beispiel #18
0
 def _generate_path(self, dt):
     s = zeros(self._num_steps + 1)
     v = zeros(self._num_steps + 1)
     s[0] = self._s0            
     v[0] = self._v0
     dW1 = standard_normal(self._num_steps)
     dW2 = self._rho * dW1 + (1 - self._rho**2)**(0.5) * standard_normal(self._num_steps)
     for j in xrange(0, self._num_steps):
         s[j + 1] = s[j] * exp((self._r - 0.5 * v[j]) * dt + (v[j] * dt)**(0.5) * dW1[j])
         v[j + 1] = max(v[j] + (self._kappa * (self._theta - v[j]) * dt) + self._lamda * (v[j] * dt)**(0.5) * dW2[j], 0)
     return s            
Beispiel #19
0
 def sample_q(self, n=None):
     """Return a multivariate normal sample and the value of its
     associated quadratic form."""
     if n is None:
         snsamps = random.standard_normal(self.ndim)
         return _mvnt.mvnsampq(self.mu, self.L, snsamps)
     samps = zeros((n, self.ndim), float)
     qvals = zeros(n, float)
     for i in range(n):
         snsamps = random.standard_normal(self.ndim)
         samps[i], qvals[i] = _mvnt.mvnsampq(self.mu, self.L, snsamps)
     return samps, qvals
Beispiel #20
0
 def sample(self, n=None):
     """Return a multivariate t sample."""
     if n is None:
         snsamps = random.standard_normal(self.ndim)
         gamsamp = random.gamma(self.hnu)
         return _mvnt.mvtsamp(self.mu, self.L, self.nu, snsamps, gamsamp)
     samps = zeros((n, self.ndim), float)
     for i in range(n):
         snsamps = random.standard_normal(self.ndim)
         gamsamp = random.gamma(self.hnu)
         samps[i] = _mvnt.mvtsamp(self.mu, self.L, self.nu, snsamps, gamsamp)
     return samps
Beispiel #21
0
	def ns(self,kk,beta=1.):
		''' Standard nested simulations
		'''
		# --- Computation budget allocation ---
		N_o = np.int(np.ceil(kk**(2./3.)*beta))
		N_i = np.int(np.ceil(kk**(1./3.)/beta))
		print "Outer loop: %d" % N_o
		print "Inner loop: %d" % N_i
		print "Total: %d" % (N_o*N_i)

		# --- True values from 2**15 LHS ---
		var_true = 246.42939112403042
		eel_true = 0.45553892616275565

		# --- portfolio loss distribution @ t = \tau via analytical formulas---
		t0 = time.time()
		ran1 = npr.standard_normal((N_o,self.D))
		S1 = np.zeros((N_o,self.D))
		S1[:,:] = self.S0
		S1[:,:] = S1[:,:] * np.exp((self.mu - 0.5*self.sigma*self.sigma)*self.tau + self.sigma *\
								np.sqrt(self.tau) * ran1[:,:])

		ValueTau = np.zeros((N_o,1))
		for dim in range(self.D):
			ran2 = npr.standard_normal((N_o,N_i))
			S2 = np.zeros((N_o,N_i))
			S2[:,:] = np.dot(S1[:,dim,np.newaxis],np.ones((1,N_i))) * np.exp((self.rfr - 0.5*self.sigma**2)*(self.T-self.tau) + self.sigma \
				* np.sqrt(self.T-self.tau) * ran2[:,:])

			prob = (1.-np.exp(-2.*(np.log(np.dot(S1[:,dim,np.newaxis],np.ones((1,N_i)))/self.H)\
			                   *np.log(S2[:,:]/self.H)/(self.sigma**2)/(self.T-self.tau))))\
		                       *(np.dot(S1[:,dim,np.newaxis],np.ones((1,N_i))) >= self.H*np.ones((N_o,N_i))).astype(float)\
		                       *(S2[:,:] >= self.H*np.ones((N_o,N_i))).astype(float)

			C2do = np.dot((np.maximum(S2[:,:]-self.K,0)*prob), np.ones((N_i,1))) / \
				float(N_i) * np.exp(-self.rfr*(self.T-self.tau))

			P2 = np.dot((np.maximum(self.K-S2[:,:],0)),np.ones((N_i,1))) / \
				float(N_i) * np.exp(-self.rfr*(self.T-self.tau))

			ValueTau[:] += -10. * C2do - 5. * P2
		t_ns = time.time() - t0
		print "%.2fs eclipsed" % t_ns

		L_ns = np.sort(self.Value0 - ValueTau)
		var = scs.scoreatpercentile(L_ns, self.perc*100.)
		eel = np.mean(np.maximum(L_ns-var,0))

		print "VaR estimated: %e (true: %e)" % (var, var_true)
		print "EEL estimated: %e (true: %e)" % (eel, eel_true)

		return (eel, t_ns)
Beispiel #22
0
def simulate_j(s0, r, q, sigma, t, t_steps, n_paths):
    np.random.seed(123456)
    dt = t / t_steps
    rj = lamb * (np.exp(mu + 0.5*delta**2) - 1)
    s = np.zeros((t_steps+1, n_paths))
    s[0] = s0
    sn1 = npr.standard_normal((t_steps+1, n_paths))
    sn2 = npr.standard_normal((t_steps+1, n_paths))
    poi = npr.poisson(lamb*dt, (t_steps+1, n_paths))
    for t in range(1, t_steps+1, 1):
        s[t] = s[t-1] * (np.exp((r - rj - 0.5*sigma**2) * dt + sigma * np.sqrt(dt) * sn1[t])\
        +(np.exp(mu + delta*sn2[t]) - 1) * poi[t])
    return s
Beispiel #23
0
def accumulate(im, vx, vy, sx, sy, num_frames, num_trials=10, threshold=25):
	"""velocities mx, my per frame
		standard deviations sx, sy
	"""
	imt = np.where(im > threshold, im/threshold, 0)
	accum = np.zeros((num_frames,)+ im.shape)
	for i in range(num_trials):
		vxi = vx + sx*standard_normal()
		vyi = vy + sy*standard_normal()
		offsets = [[vxi*i, vyi*i] for i in range(num_frames)]
		accum += shift_series(imt, offsets)

	return np.clip(accum*(1.0/num_trials), 0.0, 1.0)
Beispiel #24
0
 def sample_q(self):
     """Return a multivariate t sample and the value of its
     associated quadratic form."""
     if n is None:
         snsamps = random.standard_normal(self.ndim)
         gamsamp = random.gamma(self.hnu)
         return _mvnt.mvtsampq(self.mu, self.L, self.nu, snsamps, gamsamp)
     samps = zeros((n, self.ndim), float)
     qvals = zeros(n, float)
     for i in range(n):
         snsamps = random.standard_normal(self.ndim)
         gamsamp = random.gamma(self.hnu)
         samps[i], qvals[i] = _mvnt.mvtsampq(self.mu, self.L, self.nu, snsamps, gamsamp)
     return samps
Beispiel #25
0
    def step(self, f):
        atoms = self.atoms
        natoms = len(atoms)

        # This velocity as well as xi, eta and a few other variables are stored
        # as attributes, so Asap can do its magic when atoms migrate between
        # processors.
        self.v = atoms.get_velocities()

        self.xi = standard_normal(size=(natoms, 3))
        self.eta = standard_normal(size=(natoms, 3))

        if self.communicator is not None:
            self.communicator.broadcast(self.xi, 0)
            self.communicator.broadcast(self.eta, 0)

        # First halfstep in the velocity.
        self.v += (self.c1 * f / self.masses - self.c2 * self.v +
                   self.c3 * self.xi - self.c4 * self.eta)

        # Full step in positions
        x = atoms.get_positions()
        if self.fixcm:
            old_cm = atoms.get_center_of_mass()
        # Step: x^n -> x^(n+1) - this applies constraints if any.
        atoms.set_positions(x + self.dt * self.v + self.c5 * self.eta)
        if self.fixcm:
            new_cm = atoms.get_center_of_mass()
            d = old_cm - new_cm
            # atoms.translate(d)  # Does not respect constraints
            atoms.set_positions(atoms.get_positions() + d)

        # recalc velocities after RATTLE constraints are applied
        self.v = (self.atoms.get_positions() - x -
                  self.c5 * self.eta) / self.dt
        f = atoms.get_forces(md=True)

        # Update the velocities
        self.v += (self.c1 * f / self.masses - self.c2 * self.v +
                   self.c3 * self.xi - self.c4 * self.eta)

        if self.fixcm:  # subtract center of mass vel
            v_cm = self._get_com_velocity()
            self.v -= v_cm

        # Second part of RATTLE taken care of here
        atoms.set_momenta(self.v * self.masses)

        return f
Beispiel #26
0
 def test_contrast3(self):
     P = np.dot(self.X, np.linalg.pinv(self.X))
     resid = np.identity(40) - P
     noise = np.dot(resid,R.standard_normal((40,5)))
     term = np.column_stack((noise, self.X[:,2]))
     c = Contrast(term, self.X)
     assert_equal(c.contrast_matrix.shape, (10,))
Beispiel #27
0
 def test_extendedpinv(self):
     X = standard_normal((40, 10))
     np_inv = np.linalg.pinv(X)
     np_sing_vals = np.linalg.svd(X, 0, 0)
     sm_inv, sing_vals = pinv_extended(X)
     assert_almost_equal(np_inv, sm_inv)
     assert_almost_equal(np_sing_vals, sing_vals)
Beispiel #28
0
def truncated_normal(shape=None, mu=0., sigma=1., x_min=None, x_max=None):
    """
    Generates random variates from a lower-and upper-bounded normal distribution

    @param shape: shape of the random sample
    @param mu:    location parameter 
    @param sigma: width of the distribution (sigma >= 0.)
    @param x_min: lower bound of variate
    @param x_max: upper bound of variate    
    @return: random variates of lower-bounded normal distribution
    """
    from scipy.special import erf, erfinv
    from numpy.random import standard_normal
    from numpy import inf, sqrt

    if x_min is None and x_max is None:
        return standard_normal(shape) * sigma + mu
    elif x_min is None:
        x_min = -inf
    elif x_max is None:
        x_max = inf
        
    x_min = max(-1e300, x_min)
    x_max = min(+1e300, x_max)
    var = sigma ** 2 + 1e-300
    sigma = sqrt(2 * var)
    
    a = erf((x_min - mu) / sigma)
    b = erf((x_max - mu) / sigma)

    return probability_transform(shape, erfinv, a, b) * sigma + mu
Beispiel #29
0
def brownian(size):
    h = 2**size
    j_max = 1

    # Generate timesteps
    #t = np.array(range(h + 1))
    t = np.linspace(0, 0.5, h + 1)
    # Generate length random numbers (Z_1, Z_{len}) ~ N(0,1)
    Z = nr.standard_normal(size=h + 1)
    print(Z)
    # Initialize output vector
    #w = [0]*(h + 1)
    w = np.zeros(h+1)
    w[h] = math.sqrt(t[h]) * Z[h]

    for k in range(1, size + 1):
        i_min = round(h/2)
        i = i_min

        l = 0
        r = h

        for j in range(1, j_max):
            a = ((t[r] - t[i]) * w[l] + (t[i] - t[l]) * w[r]) / (t[r]-t[l]);
            b = math.sqrt((t[i]*t[l]) * (t[r] - t[i]) / (t[r] - t[l]))
            w[i] = a + b*Z[i]
            i = i + h
            l = l + h
            r = r + h
        j_max *= 2
        h = i_min
    return (w[1:-1], Z)
Beispiel #30
0
def randomGamma(gam, num):
    """
    generates random warping functions

    :param gam: numpy ndarray of N x M of M of warping functions
    :param num: number of random functions

    :return: rgam: random warping functions

    """
    mu, gam_mu, psi, vec = SqrtMean(gam)
    K = cov(vec)

    U, s, V = svd(K)
    n = 5
    TT = vec.shape[0] + 1
    vm = vec.mean(axis=1)

    rgam = zeros((TT, num))
    for k in range(0, num):
        a = rn.standard_normal(n)
        v = zeros(vm.size)
        for i in range(0, n):
            v = v + a[i] * sqrt(s[i]) * U[:, i]

        vn = norm(v) / sqrt(TT)
        psi = cos(vn) * mu + sin(vn) * v / vn
        tmp = zeros(TT)
        tmp[1:TT] = cumsum(psi * psi) / TT
        rgam[:, k] = (tmp - tmp[0]) / (tmp[-1] - tmp[0])

    return rgam
def generate_data(missing,
                  datatype,
                  const=False,
                  ntk=(971, 7, 5),
                  other_effects=0,
                  rng=None):
    if rng is None:
        np.random.seed(12345)
    else:
        np.random.set_state(rng.get_state())

    n, t, k = ntk
    k += const
    x = standard_normal((k, t, n))
    beta = np.arange(1, k + 1)[:, None, None] / k
    y = (x * beta).sum(0) + standard_normal((t, n)) + 2 * standard_normal(
        (1, n))
    w = np.random.chisquare(5, (t, n)) / 5
    c = None
    if other_effects == 1:
        cats = ['Industries']
    else:
        cats = ['cat.' + str(i) for i in range(other_effects)]
    if other_effects:
        c = np.random.randint(0, 4, (other_effects, t, n))

    vcats = ['varcat.' + str(i) for i in range(2)]
    vc2 = np.ones((2, t, 1)) @ np.random.randint(0, n // 2, (2, 1, n))
    vc1 = vc2[[0]]

    if const:
        x[0] = 1.0

    if missing > 0:
        locs = np.random.choice(n * t, int(n * t * missing))
        y.flat[locs] = np.nan
        locs = np.random.choice(n * t * k, int(n * t * k * missing))
        x.flat[locs] = np.nan

    if datatype in ('pandas', 'xarray'):
        entities = ['firm' + str(i) for i in range(n)]
        time = date_range('1-1-1900', periods=t, freq='A-DEC')
        var_names = ['x' + str(i) for i in range(k)]
        # y = DataFrame(y, index=time, columns=entities)
        y = panel_to_frame(y[None],
                           items=['y'],
                           major_axis=time,
                           minor_axis=entities,
                           swap=True)
        w = panel_to_frame(w[None],
                           items=['w'],
                           major_axis=time,
                           minor_axis=entities,
                           swap=True)
        w = w.reindex(y.index)
        x = panel_to_frame(x,
                           items=var_names,
                           major_axis=time,
                           minor_axis=entities,
                           swap=True)
        x = x.reindex(y.index)
        c = panel_to_frame(c,
                           items=cats,
                           major_axis=time,
                           minor_axis=entities,
                           swap=True)
        c = c.reindex(y.index)
        vc1 = panel_to_frame(vc1,
                             items=vcats[:1],
                             major_axis=time,
                             minor_axis=entities,
                             swap=True)
        vc1 = vc1.reindex(y.index)
        vc2 = panel_to_frame(vc2,
                             items=vcats,
                             major_axis=time,
                             minor_axis=entities,
                             swap=True)
        vc2 = vc2.reindex(y.index)

    if datatype == 'xarray':
        # TODO: This is broken now, need to transfor multiindex to xarray 3d
        import xarray as xr
        x = xr.DataArray(PanelData(x).values3d,
                         coords={
                             'entities': entities,
                             'time': time,
                             'vars': var_names
                         },
                         dims=['vars', 'time', 'entities'])
        y = xr.DataArray(PanelData(y).values3d,
                         coords={
                             'entities': entities,
                             'time': time,
                             'vars': ['y']
                         },
                         dims=['vars', 'time', 'entities'])
        w = xr.DataArray(PanelData(w).values3d,
                         coords={
                             'entities': entities,
                             'time': time,
                             'vars': ['w']
                         },
                         dims=['vars', 'time', 'entities'])
        if c.shape[1] > 0:
            c = xr.DataArray(PanelData(c).values3d,
                             coords={
                                 'entities': entities,
                                 'time': time,
                                 'vars': c.columns
                             },
                             dims=['vars', 'time', 'entities'])
        vc1 = xr.DataArray(PanelData(vc1).values3d,
                           coords={
                               'entities': entities,
                               'time': time,
                               'vars': vc1.columns
                           },
                           dims=['vars', 'time', 'entities'])
        vc2 = xr.DataArray(PanelData(vc2).values3d,
                           coords={
                               'entities': entities,
                               'time': time,
                               'vars': vc2.columns
                           },
                           dims=['vars', 'time', 'entities'])

    if rng is not None:
        rng.set_state(np.random.get_state())

    return AttrDict(y=y, x=x, w=w, c=c, vc1=vc1, vc2=vc2)
Beispiel #32
0
 def setup_class(cls):
     np.random.seed(54321)
     cls.X = standard_normal((40, 10))
Beispiel #33
0
import numpy as np
import numpy.random as npr
import matplotlib.pyplot as plt

x = npr.standard_normal(5000)
print({"{:15.10f}".format(x.mean())})
print({"{:15.10f}".format(x.std())})

# {'  -0.0006020942'}  its not quite zero
# {'   0.9997493059'}  its not quite 1

plt.hist(x, bins=50)
# plt.show()

# We need to normalise to make 0 and 1

y = np.concatenate([x, -x])
print({"{:15.10f}".format(y.mean())})

plt.hist(y, bins=50)
# plt.show()

# now to normalise
z = (y - y.mean()) / y.std()
print({"{:15.10f}".format(z.mean())})
print({"{:15.10f}".format(z.std())})
plt.hist(z, bins=50)
plt.show()

##############################################
    markcols.append(i+1)

#Create column for duopolists
duopcols = []
for i in range(markets):
	duopcols.append(1)
	duopcols.append(2)
	
#Create matrix so random entries can be used
#mXk matrix 
matrix = [[0 for k in range(4)] for m in range(markets*2)]
 
#Initialize with N(0,1) draws
for i in range(markets*2):
	for j in range(4):
		matrix[i][j] = rand.standard_normal()

#Turn vecotrs and matrix into DataFrame for further manipulation down below
#Final table to be exported is exog_matrix
markcols_df = pd.DataFrame(markcols)
duopcols_df = pd.DataFrame(duopcols)
cols_df = pd.concat([markcols_df, duopcols_df], axis=1)
matrix_df = pd.DataFrame(matrix)
exog_matrix = pd.concat([cols_df, matrix_df], axis=1)
exog_matrix.columns = ['mkt','firm','x', 'w', 'derr', 'cerr']
	
#Export the exogenous data to a csv file
#exog_matrix.to_csv('exog_data.csv', sep=',', index=False)

#Functions: Below is a series of functions that will be used later in the program
#The first four relate to computing market shares
Beispiel #35
0
 def __init__(self):
     np.random.seed(54321)
     self.X = standard_normal((40,10))
Beispiel #36
0
example = 2  # 1,2 or 3

import numpy as np
import numpy.random as R
import matplotlib.pyplot as plt

from statsmodels.sandbox.gam import AdditiveModel
from statsmodels.sandbox.gam import Model as GAM  #?
from statsmodels.genmod.families import family
from statsmodels.genmod.generalized_linear_model import GLM

standardize = lambda x: (x - x.mean()) / x.std()
demean = lambda x: (x - x.mean())
nobs = 150
x1 = R.standard_normal(nobs)
x1.sort()
x2 = R.standard_normal(nobs)
x2.sort()
y = R.standard_normal((nobs, ))

f1 = lambda x1: (x1 + x1**2 - 3 - 1 * x1**3 + 0.1 * np.exp(-x1 / 4.))
f2 = lambda x2: (x2 + x2**2 - 0.1 * np.exp(x2 / 4.))
z = standardize(f1(x1)) + standardize(f2(x2))
z = standardize(z) * 2  # 0.1

y += z
d = np.array([x1, x2]).T

if example == 1:
    print "normal"
Beispiel #37
0
n = 10000  # MC simulation trials
T = 24.  # total time
m = 100.  # subintervals
dt = T / m  # difference in time each subinterval

r = np.zeros(shape=(n, m), dtype=float)  # matrix to hold short rate paths

fig1 = figure(1)
fig1.clf()
ax1a = fig1.add_subplot(121)

for j in np.arange(0, n):  # number of MC simulations
    r[j, 0] = r0
    for i in np.arange(1, m):  #trials per simulation
        r[j, i] = r[j, i - 1] + k * (
            theta - r[j, i - 1]) * dt + beta * sqrt(dt) * standard_normal()
        #print j,i, r[j,i]
    ax1a.plot(np.arange(0, T, dt), r[j])

## plot paths
t = np.arange(0, T, dt)
rT_expected = theta + (r0 - theta) * pow(np.e, -k * t)
rT_stdev = sqrt(pow(beta, 2) / (2 * k) * (1 - pow(np.e, -2 * k * t)))
print 'expected', rT_expected, 'std', rT_stdev

plot(np.arange(0, T, dt), rT_expected, '-or')
plot(np.arange(0, T, dt), rT_expected + 2 * rT_stdev, '-ob')
plot(np.arange(0, T, dt), rT_expected - 2 * rT_stdev, '-ob')

print shape(t), shape(r)
Beispiel #38
0
    def value_at_risk(self):
        #Value-at-Risk
        lamb = 0.75
        mu = -0.6
        delta = 0.25
        M = 50
        
        
        S0 = 100
        r = 0.05
        sigma = 0.25
        T = 30 / 365.
        I = 10000
        ST = S0 * np.exp((r - 0.5 * sigma ** 2) * T
            + sigma * np.sqrt(T) * npr.standard_normal(I))
        
        R_gbm = np.sort(ST - S0)
        plt.hist(R_gbm, bins=50)
        plt.xlabel("absolute return")
        plt.ylabel("frequency")
        plt.grid(True)
        
        percs = [0.01, 0.1, 1., 2.5, 5.0, 10.0]
        var = scs.scoreatpercentile(R_gbm, percs)
        print("%16s %16s" % ("Confidence Level", "Value-at-Risk"))
        print(33 * "-")
        for pair in zip(percs, var):
            print("%16.2f %16.3f" % (100 - pair[0], -pair[1]))

        dt = 30. / 365 / M
        rj = lamb * (np.exp(mu + 0.5 * delta ** 2) - 1)
        S = np.zeros((M + 1, I))
        S[0] = S0
        sn1 = npr.standard_normal((M + 1, I))
        sn2 = npr.standard_normal((M + 1, I))
        poi = npr.poisson(lamb * dt, (M + 1, I))
        for t in range(1, M + 1, 1):
            S[t] = S[t - 1] * (np.exp((r - rj - 0.5 * sigma ** 2) * dt
            + sigma * np.sqrt(dt) * sn1[t])
            + (np.exp(mu + delta * sn2[t]) - 1)
            * poi[t])
            S[t] = np.maximum(S[t], 0)
            
        R_jd = np.sort(S[-1] - S0)

        plt.hist(R_jd, bins=50)
        plt.xlabel("absolute return")
        plt.ylabel("frequency")
        plt.grid(True)
                
        percs = [0.01, 0.1, 1., 2.5, 5.0, 10.0]
        var = scs.scoreatpercentile(R_jd, percs)
        print("%16s %16s" % ("Confidence Level", "Value-at-Risk"))
        print(33 * "-")
        for pair in zip(percs, var):
            print("%16.2f %16.3f" % (100 - pair[0], -pair[1]))
        
        percs = list(np.arange(0.0, 10.1, 0.1))
        gbm_var = scs.scoreatpercentile(R_gbm, percs)
        jd_var = scs.scoreatpercentile(R_jd, percs)
        
        plt.plot(percs, gbm_var, "b", lw=1.5, label="GBM")
        plt.plot(percs, jd_var, "r", lw=1.5, label="JD")
        plt.legend(loc=4)
        plt.xlabel("100 - confidence level [%]")
        plt.ylabel("value-at-risk")
        plt.grid(True)
        plt.ylim(ymax=0.0)
        pass
Beispiel #39
0
        dFdh = dFdh - np.dot(penalty_cov, h - penalty_mean)
        dFdhh = dFdhh - penalty_cov

        dh = npl.solve(dFdhh, dFdh)
        h -= dh
        C = np.sum([h[i] * Q[i] for i in range(Q.shape[0])], axis=0)

        df = (dFdh * dh).sum()
        if np.fabs(df) < 1.0e-01:
            break

        _iter += 1
        if _iter >= niter:
            break

    return C, h, -dFdhh


if __name__ == "__main__":

    import numpy.random as R

    X = R.standard_normal((500, 3))
    Q = np.array([
        np.identity(3),
        np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]]),
        np.array([[1, 0, 0], [0, 1, 1], [0, 1, 1]])
    ], np.float)

    print(reml(np.dot(X.T, X), Q))
Beispiel #40
0
 def setUp(self):
     rnd.seed(1234567)
     self.xd1 = rnd.standard_normal(128)
     self.xf1 = self.xd1.astype(np.float32)
     self.xz1 = rnd.standard_normal((128,2)).view(dtype=np.complex128).squeeze()
     self.xc1 = self.xz1.astype(np.complex64)
Beispiel #41
0
    def test_rank(self):
        X = standard_normal((40, 10))
        self.assertEquals(tools.rank(X), 10)

        X[:, 0] = X[:, 1] + X[:, 2]
        self.assertEquals(tools.rank(X), 9)
Beispiel #42
0
def build_random_sparse_mdp(n_states, n_actions, discount):
    P = sparsify(rnd.random((n_states, n_states, n_actions)))
    r = sparsify(rnd.standard_normal((n_states, n_actions)))
    d0 = rnd.random((n_states, 1))
    return MDP(n_states, n_actions, P / P.sum(axis=0, keepdims=True), r,
               discount, d0 / d0.sum(axis=0, keepdims=True))
Beispiel #43
0
def apply_errormodels(maskgals,
                      mag_in,
                      b=None,
                      err_ratio=1.0,
                      fluxmode=False,
                      nonoise=False,
                      inlup=False):
    """
    Find magnitude and uncertainty.

    parameters
    ----------
    mag_in    :
    nonoise   : account for noise / no noise
    zp:       : Zero point magnitudes
    nsig:     :
    fluxmode  :
    lnscat    :
    b         : parameters for luptitude calculation
    inlup     :
    errtflux  :
    err_ratio : scaling factor

    returns
    -------
    mag
    mag_err

    """
    f1lim = 10.**((maskgals.limmag - maskgals.zp[0]) / (-2.5))
    fsky1 = (((f1lim**2.) * maskgals.exptime) / (maskgals.nsig[0]**2.) - f1lim)
    fsky1 = np.clip(fsky1, 0.001, None)

    if inlup:
        bnmgy = b * 1e9
        tflux = maskgals.exptime * 2.0 * bnmgy * \
            np.sinh(-np.log(b) - 0.4 * np.log(10.0) * mag_in)
    else:
        tflux = maskgals.exptime * 10.**((mag_in - maskgals.zp[0]) / (-2.5))

    noise = err_ratio * np.sqrt(fsky1 * maskgals.exptime + tflux)

    if nonoise:
        flux = tflux
    else:
        flux = tflux + noise * random.standard_normal(mag_in.size)

    if fluxmode:
        mag = flux / maskgals.exptime
        mag_err = noise / maskgals.exptime
    else:
        if b is not None:
            bnmgy = b * 1e9

            flux_new = flux / maskgals.exptime
            noise_new = noise / maskgals.exptime

            mag = 2.5 * np.log10(1.0 / b) - np.arcsinh(
                0.5 * flux_new / bnmgy) / (0.4 * np.log(10.0))
            mag_err = 2.5 * noise_new / \
                (2.0 * bnmgy * np.log(10.0) *
                 np.sqrt(1.0 + (0.5 * flux_new / bnmgy)**2.0))
        else:
            mag = maskgals.zp[0] - 2.5 * np.log10(flux / maskgals.exptime)
            mag_err = (2.5 / np.log(10.0)) * (noise / flux)

            bad, = np.where(np.isfinite(mag) == False)
            mag[bad] = 99.0
            mag_err[bad] = 99.0

    return mag, mag_err
Beispiel #44
0
def hot_pixel_clipping(n_obs=7, n_hot=10, verbose=False):
    """Test that hot pixels are clipped.
    
    This creates a small number of very hot pixels in otherwise almost uniform
    random data, and then checks that they are correctly removed.
    """

    # These must match the actual numbers used in the cubing code.
    # @TODO: These should be passed to the cubing code to ensure consistency.
    drop_size = 1.6
    output_size = 0.5

    pixel_ratio = 3.14 * (
        (drop_size + 1.2 * output_size) / 2)**2 / (output_size**2)
    # This is the approximate number of output pixels each input pixel maps onto.
    # NOTE: The factor or 1.2*output_size is empirically determined, but seems
    # reasonable. The actual right answer will depend on the ratio
    # drop_size/output_size

    lambda_size = 20
    # Number of wavelength slices

    # First, we create a set of n_obs mock observations.
    ifu_list = []
    for i in range(n_obs):
        ifu = dummy_ifu()

        ifu.data = standard_normal(size=(np.shape(ifu.data)[0],
                                         lambda_size)) + 100
        # Random data with stddev = 1 and mean = 100

        ifu_list.append(ifu)

    # Run the null test on the data
    data_cube, var_cube, weight_cube, diagnostic_info = dithered_cube_from_rss(
        ifu_list, offsets=None)

    before = diagnostic_info['unmasked_pixels_before_sigma_clip']
    after = diagnostic_info['unmasked_pixels_after_sigma_clip']

    expected_rej = n_obs * np.asarray(ifu_list[0].data.shape).prod() * erfc(
        5.0 / np.sqrt(2)) * pixel_ratio

    if (before - after > expected_rej) or (before - after < 0):
        print("Failed test: hot_pixel_clipping 1")
        print(
            "    The sigma clipping removed {0} pixels, expected {1}.".format(
                before - after, expected_rej))
    elif verbose:
        print("Passed test: hot_pixel_clipping 1")
        print(
            "    The sigma clipping removed {0} pixels, expected {1}.".format(
                before - after, expected_rej))

    # Test 2:
    #
    #     Now we add in some hot pixels, and check that the expected number of
    #     pixels are masked from the output
    for i in range(n_obs):

        for noise_i in range(n_hot):
            ifu_list[i].data[randint(0,
                                     np.shape(ifu.data)[0] - 1),
                             randint(0,
                                     np.shape(ifu.data)[1] - 1)] = 10000.0

    data_cube, var_cube, weight_cube, diagnostic_info = dithered_cube_from_rss(
        ifu_list, offsets=None)

    before = diagnostic_info['unmasked_pixels_before_sigma_clip']
    after = diagnostic_info['unmasked_pixels_after_sigma_clip']

    n_masked_actual = before - after
    n_masked_expected = n_obs * n_hot * pixel_ratio + expected_rej

    if (np.abs(n_masked_actual - n_masked_expected) > 0.1 * n_masked_expected):
        print("Failed test: hot_pixel_clipping 2")
        print(
            "    Expected number of pixels clipped: {0}, actual number clipped: {1}"
            .format(n_masked_expected, before - after))
    elif verbose:
        print("Passed test: hot_pixel_clipping 2")
        print(
            "    Expected number of pixels clipped: {0}, actual number clipped: {1}"
            .format(n_masked_expected, before - after))
Beispiel #45
0
from pylab import *
from numpy import *
from numpy import random
from scipy.ndimage import filters
from scipy.misc import imsave

from pcv.tools import rof
"""
This is the de-noising example using ROF in Section 1.5.
"""

# create synthetic image with noise
im = zeros((500, 500))
im[100:400, 100:400] = 128
im[200:300, 200:300] = 255
im = im + 30 * random.standard_normal((500, 500))

U, T = rof.denoise(im, im)
G = filters.gaussian_filter(im, 10)

# save the result
imsave('synth_original.pdf', im)
imsave('synth_rof.pdf', U)
imsave('synth_gaussian.pdf', G)

# plot
figure()
gray()

subplot(1, 3, 1)
imshow(im)
Beispiel #46
0
print(a / b)

a = np.array([1, 2], dtype=float)

b = np.array([2, 2], dtype=float)

print(a / b)
print(a // b)

# tirage uniforme dans [0,1]

print(random.random_sample((3, 3)))  # ou random.rand

# tirage suivant une loi normale

print(random.standard_normal((3, 3)))  # ou random.randn

a = random.standard_normal(10000)

plt.hist(a, 40)

plt.show()

# affichage des tirages comme une image

C = random.standard_normal((32, 32))

print(C.shape)

plt.imshow(C, interpolation='nearest')
plt.colorbar()
Beispiel #47
0
min_num_samples = 50  #

samples = np.empty(0, dtype=float)
sample_mean = np.nan
confidence_estimate = np.nan
required_num_samples = min_num_samples

# Berry-Esseen constants
C_0 = .4785
C_1 = 30.2338

cbe = lambda x: min(C_0, C_1 * (1 + abs(x))**-3)

for n in range(1, 1000000):

    samples = np.append(samples, rnd.standard_normal())

    if n < required_num_samples:
        continue

    # Compute moments
    sample_mean = np.mean(samples)
    unbiased_samples = samples - sample_mean
    sigma_moment = sqrt(np.sum(unbiased_samples**2) / n)
    beta_bar_moment = np.sum(np.abs(unbiased_samples)**
                             3) / (n * sigma_moment**3)
    beta_hat_moment = np.sum(unbiased_samples**3) / (n * sigma_moment**3)
    kappa_moment = np.sum(unbiased_samples**4) / (n * sigma_moment**4) - 3

    # Estimate the confidence
    sample_sqrt = sqrt(n)
Beispiel #48
0
from numpy.random import standard_normal, uniform, gamma
import matplotlib.pyplot as plt
from generate_histogram_plot import *

NUM_SAMPLES_PER_DISTRIBUTION = 5000000
NUM_BINS = 50

if __name__ == "__main__":
    # Generate samples for each distribution
    distributions = [
        uniform(-6, 6, NUM_SAMPLES_PER_DISTRIBUTION),
        standard_normal(NUM_SAMPLES_PER_DISTRIBUTION),
        gamma(5.0, size=NUM_SAMPLES_PER_DISTRIBUTION)
    ]
    # Place distributions in histogram objects
    histograms = [
        Histogram("Uniform Distribution", [distributions[0]]),
        Histogram("Gaussian Distribution", [distributions[1]]),
        Histogram("Gamma Distribution", [distributions[2]])
    ]
    # Plot histograms to files
    for hist in histograms:
        outputFilename = "%s.pdf" % hist.title
        hist.plotColumnToFile(outputFilename,
                              0,
                              plt,
                              title=False,
                              numBins=NUM_BINS)
Beispiel #49
0
mu = 1  # percent drift
NumSimulation = 6
colors = ["b", "g", "r", "c", "m", "k"]

# Plot.
plt.figure(figsize=(12, 12))

Steps = int(round(T / tstep))  # steps in years
S = np.zeros([NumSimulation, Steps])
x = range(0, int(Steps))

for j in range(0, NumSimulation):
    S[j, 0] = Sinit
    for i in x[:-1]:
        S[j, i +
          1] = S[j,
                 i] + S[j, i] * (mu - .5 * pow(sigma, 2)) * tstep + sigma * S[
                     j, i] * np.sqrt(tstep) * standard_normal()

    plt.plot(x, S[j], linewidth=2, color=colors[j])

plt.title(
    "%d Brownian motion simulations using %d Steps, \n$\sigma$=%.6f $\mu$=%.6f$S_O$=%.6f"
    % (int(NumSimulation), int(Steps), sigma, mu, Sinit),
    fontsize=18)
plt.xlabel("Steps", fontsize=16)
plt.grid(True)
plt.ylabel("Stock price", fontsize=16)
plt.ylim(0, 90)
plt.show()
Beispiel #50
0
 def draw_nparray(self, shape=(1, )):
     """ Draw a numpy array of random samples, of a certain shape."""
     return np.minimum(
         np.maximum(self.mu + self.sigma * standard_normal(shape),
                    self.min), self.max)
Beispiel #51
0
def generate_data(
    missing: bool,
    datatype: Literal["pandas", "xarray", "numpy"],
    const: bool = False,
    ntk: tuple[int, int, int] = (971, 7, 5),
    other_effects: int = 0,
    rng: RandomState | None = None,
    num_cats: int | list[int] = 4,
):
    if rng is None:
        np.random.seed(12345)
    else:
        np.random.set_state(rng.get_state())
    from linearmodels.typing import Float64Array

    n, t, k = ntk
    k += const
    x = standard_normal((k, t, n))
    beta = np.arange(1, k + 1)[:, None, None] / k
    y: Float64Array = np.empty((t, n), dtype=np.float64)
    y[:, :] = (x * beta).sum(0) + standard_normal(
        (t, n)) + 2 * standard_normal((1, n))
    w = np.random.chisquare(5, (t, n)) / 5
    c = np.empty((y.size, 0), dtype=int)
    if other_effects == 1:
        cats = ["Industries"]
    else:
        cats = ["cat." + str(i) for i in range(other_effects)]
    if other_effects:
        if isinstance(num_cats, int):
            num_cats = [num_cats] * other_effects
        oe = []
        for i in range(other_effects):
            nc = num_cats[i]
            oe.append(np.random.randint(0, nc, (1, t, n)))
        c = np.concatenate(oe, 0)

    vcats = ["varcat." + str(i) for i in range(2)]
    vc2 = np.ones((2, t, 1)) @ np.random.randint(0, n // 2, (2, 1, n))
    vc1 = vc2[[0]]

    if const:
        x[0] = 1.0

    if missing > 0:
        locs = np.random.choice(n * t, int(n * t * missing))
        y.flat[locs] = float(np.nan)
        locs = np.random.choice(n * t * k, int(n * t * k * missing))
        x.flat[locs] = float(np.nan)
    if rng is not None:
        rng.set_state(np.random.get_state())
    if datatype == "numpy":
        return AttrDict(y=y, x=x, w=w, c=c, vc1=vc1, vc2=vc2)

    entities = ["firm" + str(i) for i in range(n)]
    time = date_range("1-1-1900", periods=t, freq="A-DEC")
    var_names = ["x" + str(i) for i in range(k)]
    # y = DataFrame(y, index=time, columns=entities)
    y_df = panel_to_frame(y[None],
                          items=["y"],
                          major_axis=time,
                          minor_axis=entities,
                          swap=True)
    w_df = panel_to_frame(w[None],
                          items=["w"],
                          major_axis=time,
                          minor_axis=entities,
                          swap=True)
    w_df = w_df.reindex(y_df.index)
    x_df = panel_to_frame(x,
                          items=var_names,
                          major_axis=time,
                          minor_axis=entities,
                          swap=True)
    x_df = x_df.reindex(y_df.index)
    if c.shape[1]:
        c_df = panel_to_frame(c,
                              items=cats,
                              major_axis=time,
                              minor_axis=entities,
                              swap=True)
    else:
        c_df = DataFrame(index=y_df.index)
    c_df = c_df.reindex(y_df.index)
    vc1_df = panel_to_frame(vc1,
                            items=vcats[:1],
                            major_axis=time,
                            minor_axis=entities,
                            swap=True)
    vc1_df = vc1_df.reindex(y_df.index)
    vc2_df = panel_to_frame(vc2,
                            items=vcats,
                            major_axis=time,
                            minor_axis=entities,
                            swap=True)
    vc2_df = vc2_df.reindex(y_df.index)
    if datatype == "pandas":
        return AttrDict(y=y_df, x=x_df, w=w_df, c=c_df, vc1=vc1_df, vc2=vc2_df)

    assert datatype == "xarray"
    import xarray as xr
    from xarray.core.dtypes import NA

    x_xr = xr.DataArray(
        PanelData(x_df).values3d,
        coords={
            "entities": entities,
            "time": time,
            "vars": var_names
        },
        dims=["vars", "time", "entities"],
    )
    y_xr = xr.DataArray(
        PanelData(y_df).values3d,
        coords={
            "entities": entities,
            "time": time,
            "vars": ["y"]
        },
        dims=["vars", "time", "entities"],
    )
    w_xr = xr.DataArray(
        PanelData(w_df).values3d,
        coords={
            "entities": entities,
            "time": time,
            "vars": ["w"]
        },
        dims=["vars", "time", "entities"],
    )
    c_vals = PanelData(c_df).values3d if c.shape[1] else NA
    c_xr = xr.DataArray(
        c_vals,
        coords={
            "entities": entities,
            "time": time,
            "vars": c_df.columns
        },
        dims=["vars", "time", "entities"],
    )
    vc1_xr = xr.DataArray(
        PanelData(vc1_df).values3d,
        coords={
            "entities": entities,
            "time": time,
            "vars": vc1_df.columns
        },
        dims=["vars", "time", "entities"],
    )
    vc2_xr = xr.DataArray(
        PanelData(vc2_df).values3d,
        coords={
            "entities": entities,
            "time": time,
            "vars": vc2_df.columns
        },
        dims=["vars", "time", "entities"],
    )
    return AttrDict(y=y_xr, x=x_xr, w=w_xr, c=c_xr, vc1=vc1_xr, vc2=vc2_xr)
Beispiel #52
0
def generate_data(
        missing,
        datatype,
        const=False,
        ntk=(971, 7, 5),
        other_effects=0,
        rng=None,
        num_cats=4,
):
    if rng is None:
        np.random.seed(12345)
    else:
        np.random.set_state(rng.get_state())

    n, t, k = ntk
    k += const
    x = standard_normal((k, t, n))
    beta = np.arange(1, k + 1)[:, None, None] / k
    y = (x * beta).sum(0) + standard_normal((t, n)) + 2 * standard_normal(
        (1, n))
    w = np.random.chisquare(5, (t, n)) / 5
    c = None
    if other_effects == 1:
        cats = ["Industries"]
    else:
        cats = ["cat." + str(i) for i in range(other_effects)]
    if other_effects:
        if not isinstance(num_cats, list):
            num_cats = [num_cats] * other_effects
        c = []
        for i in range(other_effects):
            nc = num_cats[i]
            c.append(np.random.randint(0, nc, (1, t, n)))
        c = np.concatenate(c, 0)

    vcats = ["varcat." + str(i) for i in range(2)]
    vc2 = np.ones((2, t, 1)) @ np.random.randint(0, n // 2, (2, 1, n))
    vc1 = vc2[[0]]

    if const:
        x[0] = 1.0

    if missing > 0:
        locs = np.random.choice(n * t, int(n * t * missing))
        y.flat[locs] = np.nan
        locs = np.random.choice(n * t * k, int(n * t * k * missing))
        x.flat[locs] = np.nan

    if datatype in ("pandas", "xarray"):
        entities = ["firm" + str(i) for i in range(n)]
        time = date_range("1-1-1900", periods=t, freq="A-DEC")
        var_names = ["x" + str(i) for i in range(k)]
        # y = DataFrame(y, index=time, columns=entities)
        y = panel_to_frame(y[None],
                           items=["y"],
                           major_axis=time,
                           minor_axis=entities,
                           swap=True)
        w = panel_to_frame(w[None],
                           items=["w"],
                           major_axis=time,
                           minor_axis=entities,
                           swap=True)
        w = w.reindex(y.index)
        x = panel_to_frame(x,
                           items=var_names,
                           major_axis=time,
                           minor_axis=entities,
                           swap=True)
        x = x.reindex(y.index)
        c = panel_to_frame(c,
                           items=cats,
                           major_axis=time,
                           minor_axis=entities,
                           swap=True)
        c = c.reindex(y.index)
        vc1 = panel_to_frame(vc1,
                             items=vcats[:1],
                             major_axis=time,
                             minor_axis=entities,
                             swap=True)
        vc1 = vc1.reindex(y.index)
        vc2 = panel_to_frame(vc2,
                             items=vcats,
                             major_axis=time,
                             minor_axis=entities,
                             swap=True)
        vc2 = vc2.reindex(y.index)

    if datatype == "xarray":
        # TODO: This is broken now, need to transfor multiindex to xarray 3d
        import xarray as xr

        x = xr.DataArray(
            PanelData(x).values3d,
            coords={
                "entities": entities,
                "time": time,
                "vars": var_names
            },
            dims=["vars", "time", "entities"],
        )
        y = xr.DataArray(
            PanelData(y).values3d,
            coords={
                "entities": entities,
                "time": time,
                "vars": ["y"]
            },
            dims=["vars", "time", "entities"],
        )
        w = xr.DataArray(
            PanelData(w).values3d,
            coords={
                "entities": entities,
                "time": time,
                "vars": ["w"]
            },
            dims=["vars", "time", "entities"],
        )
        if c.shape[1] > 0:
            c = xr.DataArray(
                PanelData(c).values3d,
                coords={
                    "entities": entities,
                    "time": time,
                    "vars": c.columns
                },
                dims=["vars", "time", "entities"],
            )
        vc1 = xr.DataArray(
            PanelData(vc1).values3d,
            coords={
                "entities": entities,
                "time": time,
                "vars": vc1.columns
            },
            dims=["vars", "time", "entities"],
        )
        vc2 = xr.DataArray(
            PanelData(vc2).values3d,
            coords={
                "entities": entities,
                "time": time,
                "vars": vc2.columns
            },
            dims=["vars", "time", "entities"],
        )

    if rng is not None:
        rng.set_state(np.random.get_state())

    return AttrDict(y=y, x=x, w=w, c=c, vc1=vc1, vc2=vc2)
Beispiel #53
0
 def _simulator(self, nobs):
     return standard_normal(nobs)
Beispiel #54
0
def standard_normal(size, params):
    try:
        return random.standard_normal(size, params['dtype'], params['out'])
    except ValueError as e:
        exit(e)
Beispiel #55
0
 def setupClass(cls):
     R.seed(54321)
     cls.X = R.standard_normal((40, 10))
Beispiel #56
0
ax1.grid(True)
ax2.hist(rn2, bins=25)
ax2.set_title('randint')
ax2.grid(True)
ax3.hist(rn3, bins=25)
ax3.set_title('sample')
ax3.set_ylabel('frequency')
ax3.grid(True)
ax4.hist(rn4, bins=25)
ax4.set_title('choice')
ax4.grid(True)

#Visualize random draws from distributions

sample_size = 500
rn1 = npr.standard_normal(sample_size)
rn2 = npr.normal(100, 20, sample_size)
rn3 = npr.chisquare(df=0.5, size=sample_size)
rn4 = npr.poisson(lam=1.0, size=sample_size)

fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2, figsize=(7, 7))

ax1.hist(rn1, bins=25, stacked=True)
ax1.set_title('standard normal')
ax1.set_ylabel('frequency')
ax1.grid(True)
ax2.hist(rn2, bins=25)
ax2.set_title('normal(100,20)')
ax2.grid(True)
ax3.hist(rn3, bins=25)
ax3.set_title('chi square')
from numpy import array
import keras

epochs = 20
batch_size = 50
num_samples = 50000
n = 200
m = 30

x = np.zeros((num_samples, n))
y = np.zeros((num_samples, m))
check = np.zeros(num_samples)
x_supp = np.zeros(x.shape)
k = 2  #sparsity of the signal

M = array(np.abs(random.standard_normal((m, n))), dtype='float32')

input_shape = y.shape[1:]
inputs = Input(shape=input_shape)

#Normalize the dictionary as implied by the standard procedure for Matching Pursuit Algorithms.
for ii in range(0, M.shape[1]):
    mind = M[:, ii]**2
    no = mind.sum()
    M[:, ii] = M[:, ii] / np.sqrt(no)
for ii in range(num_samples):
    p = random.permutation(n)
    x[ii, p[0:k]] = random.uniform(0.2, 1, (k, ))
    y[ii, :] = np.dot(M, x[ii, :])

x_train, x_test, y_train, y_test = train_test_split(x,
Beispiel #58
0
 def setup_class(cls):
     np.random.seed(54321)
     cls.X = standard_normal((40, 10, 30))
     cls.h = scale.Huber(maxiter=1000, tol=1.0e-05)
Beispiel #59
0
 def __init__(self):
     np.random.seed(54321)
     self.X = standard_normal((40,10,30))
     self.h = scale.Huber(maxiter=1000, tol=1.0e-05)
Beispiel #60
0
import matplotlib.pyplot as plt
import numpy.random as rd

# x = rd.laplace(loc=100, scale=100,size=(100000,))
# x = rd.normal(loc=0, scale=1,size=(100000,))
x = rd.standard_normal(size=(100000, ))

figure = plt.figure(1, figsize=(8, 6))
ax = figure.add_axes([0.1, 0.1, 0.8, 0.8])

ax.hist(x=x, bins=1000)

plt.show()