Пример #1
0
def ubspecdat(h,s,f,df):
    """Calculate ubr and Tbr from measured spectra

    The input parameter *f* can be either a scalar or a vector 

    Parameters
    ----------
    h : float or array-like
        Water depth (m)
    s : array-like
        Spectral densities normalized so that
            Hs = 4 * sum(s, 2) * df
        *s* is either of length *nf* or (*nt*, *nf*).
    f : array-like
        Row vector with central frequencies (Hz)
    df : float, optional
        Scalar or row vector with freq. bandwidths (Hz)

    Returns
    -------
    (ubr, Tbr) :
        ubr = representative bottom orbital velocity (m/s)
        Tbr = representative bottom wave period (s)
        The alternative bottom period, Tbz, is also calculated (see text).
    """
    # Chris Sherwood, USGS
    # Last revised September 8, 2006
    # Recoded in Python by PL Wiberg, Oct 2014

    xx = s.shape
    nf = xx[1]
    nt = xx[0]
    w = 2 * np.pi * f
    # Determine kh using Soulsby (2006) method 
    kh = qkhfs(w,h)
    w = np.tile(w,(nt,1))
    fm = np.tile(f,(nt,1))
    kh = np.tile(kh,(nt,1))
    h = h * np.ones((nt,nf))

    if df:
        if np.len(df) == 1:
            df = df * np.ones(nt,nf)
        elif np.len(df) == nf:
            df = np.tile(df,(nt,1))
    else:
        df = np.diff(f)
        df = np.hstack((df,df[-1]))
        df = np.tile(df,(nt,1))
  
    Su = ((w ** 2) / (np.sinh(kh) ** 2)) * s
    ubr = np.sqrt(2 * np.sum((Su * df)))
    fr = np.sum((Su * fm * df)) / (np.sum((Su * df)))
    Tbr = 1. / fr
    fz = np.sqrt(np.sum((Su * fm ** 2 * df))) / (np.sum((Su * df)))
    Tbz = 1. / fz
    
    return ubr, Tbr
    
Пример #2
0
def ubspecdat(h, s, f, df):
    """Calculate ubr and Tbr from measured spectra

    The input parameter *f* can be either a scalar or a vector 

    Parameters
    ----------
    h : float or array-like
        Water depth (m)
    s : array-like
        Spectral densities normalized so that
            Hs = 4 * sum(s, 2) * df
        *s* is either of length *nf* or (*nt*, *nf*).
    f : array-like
        Row vector with central frequencies (Hz)
    df : float, optional
        Scalar or row vector with freq. bandwidths (Hz)

    Returns
    -------
    (ubr, Tbr) :
        ubr = representative bottom orbital velocity (m/s)
        Tbr = representative bottom wave period (s)
        The alternative bottom period, Tbz, is also calculated (see text).
    """
    # Chris Sherwood, USGS
    # Last revised September 8, 2006
    # Recoded in Python by PL Wiberg, Oct 2014

    xx = s.shape
    nf = xx[1]
    nt = xx[0]
    w = 2 * np.pi * f
    # Determine kh using Soulsby (2006) method
    kh = qkhfs(w, h)
    w = np.tile(w, (nt, 1))
    fm = np.tile(f, (nt, 1))
    kh = np.tile(kh, (nt, 1))
    h = h * np.ones((nt, nf))

    if df:
        if np.len(df) == 1:
            df = df * np.ones(nt, nf)
        elif np.len(df) == nf:
            df = np.tile(df, (nt, 1))
    else:
        df = np.diff(f)
        df = np.hstack((df, df[-1]))
        df = np.tile(df, (nt, 1))

    Su = ((w**2) / (np.sinh(kh)**2)) * s
    ubr = np.sqrt(2 * np.sum((Su * df)))
    fr = np.sum((Su * fm * df)) / (np.sum((Su * df)))
    Tbr = 1. / fr
    fz = np.sqrt(np.sum((Su * fm**2 * df))) / (np.sum((Su * df)))
    Tbz = 1. / fz

    return ubr, Tbr
Пример #3
0
	def stackOLA(self,input_ola,window_ola):
		input_len = np.len(input_ola)
		new_window_ola_len = np.len(window_ola)
		step_stack = int(new_window_func*0.5)
		count_stack = int((input_len-new_window_ola_len)/step_stack) + 1
		ola_stack = np.zeros((new_window_ola_len, count_stack))
		for i in range(0,count_stack):
			ola_stack[i]=window_ola*input_ola[1:new_window_ola_len + ((i-1)*step)] #have to correct the array format

		return ola_stack
Пример #4
0
def scaledown(x, minval, maxval):
    """
    Converts [minval,maxval] to [-1,1] with points always lying inside [-1,1]. Returns scaled down values.
    """
    scaled = np.min((
        np.max((2 * (x - minval) / (maxval - minval) - 1,
                -1 * np.ones(np.len(x)))),
        np.ones(np.len(x)),
    ))
    return scaled
Пример #5
0
def scaleup(x, minval, maxval):
    """
    Converts [-1,1] to [minval,maxval] range with points always lying inside new range. Returns scaled up version of values.
    """
    scaled = np.min((
        np.max((
            0.5 * (x + 1) * (maxval - minval) + minval,
            minval * np.ones(np.len(x)),
        )),
        maxval * np.ones(np.len(x)),
    ))
    return scaled
    def to_signal(self):
        """
            Creates a hyperspy.Signal1D object with the same spectrum

            Returns
            ----------
                s : hyperspy.Signal1D
                    hyperspy object
        """
        nf = np.len(self.spec_x_array)

        spec_name = 'index'
        if self.spec_units in ['nm', 'um']:
            spec_name = 'Wavelength'
        elif self.spec_units == 'eV':
            spec_name = 'E'

        dict_f = {
            'name': spec_name,
            'units': self.spec_units,
            'scale': self.spec_x_array[1] - self.spec_x_array[0],
            'size': nf,
            'offset': self.spec_x_array[0]
        }

        s = Signal1D(np.squeeze(self.spec_im), axes=[dict_f])
        s.change_dtype('float64')
        return s
Пример #7
0
def get_h_char_track(f, f_start, f_end, M, eta, M_chirp, Dl, constants):

    # constants = np.arrays([H0, Omega_m, L, f_star, Tobs, NC])
    f_star = constants[3]
    L = constants[2]
    NC = constants[5]
    Tobs = constants[4]

    A_arr = np.zeros(len(f))
    h_c_arr = np.zeros(len(f))

    arg_start = np.where(f <= f_start)[0][-1]
    if (f_end > 1.):  # i.e. off the graph
        arg_end = np.len(f) - 1
    else:
        arg_end = np.where(f >= f_end)[0][0]

    for i in range(arg_start, arg_end):
        A_arr[i] = get_A(f[i], M, eta, M_chirp, Dl)
        h_c_arr[i] = f[i] * A_arr[i] * np.sqrt(16. / 5.)

    SNR = 0.
    f, Sn = get_Sn(constants)
    for i in range(arg_start, arg_end):
        freq = 0.5 * (f[i] + f[i - 1])
        Sn_est = 0.5 * (1. / Sn[i] + 1. / Sn[i - 1])
        SNR += 16. / 5. * freq * get_A(
            freq, M, eta, M_chirp,
            Dl)**2 * Sn_est * (np.log(f[i]) - np.log(f[i - 1]))
    SNR = np.sqrt(SNR)

    return h_c_arr, SNR
Пример #8
0
def trapezoidal(f, a, b, N):
    
    h=(b-a)/N;
    x=np.linspace(a,b,N+1);
    
    y = (h/2)*(f(x[0:np.len(x)-1])+f(x[1:len(x)]));
    
    return np.sum(y);
Пример #9
0
def ssnj_yerror(x, y):
    x_sum = np.sum(x)  #B
    y_sum = np.sum(y)  #F
    y_ave = y_sum / np.len(y)
    x2_sum = np.sum(x * x)  #A
    delta = np.len(x) * x2_sum - x_sum * x_sum  #

    Y_ave = np.array([y_ave] * len(x))
    Y_var = y - Y_ave  # array
    σ_obs = Y_var * Y_var
    σ_y = np.sqrt(np.sum(σ_obs) / (len(x) - 2))

    σ_a = σ_y * np.sqrt(len(x) / delta)
    σ_b = σ_y * np.sqrt(x2_sum / delta)
    array = [σ_a, σ_b]

    return (array)
Пример #10
0
def get_cohen_d(treatment, control):
	#For t-test, the effect size is:

	m_t = np.mean(treatment) 
	m_c = np.mean(control) 	

	sd_t = np.std(treatment) 
	sd_c = np.std(control)

	n_t = np.len(treatment)
	n_c = np.len(control)

	sd_pooled = np.sqrt((
						((n_t - 1) * np.pow(sd_t,2)) 
							+ 
						((n_c - 1) * np.pow(sd_c,2))
					   )) /  (n_t+n_c)

	d = m_t - m_c / sd_pooled
Пример #11
0
 def __init__(self, vN, tilt, p, nx1):
     self.nx = np.len(vN)
     self.t = np.round(
         np.append(np.linspace(-tilt, tilt, nx1 / 2),
                   np.linspace(tilt, -tilt, nx1 / 2))).astype(int)
     self.v = np.zeroes_like(vN)
     for j in range(nx):
         self.v[j] = np.roll(vN[j], -int(p), axis=0)
         for i in range(nx1):
             self.v[j, i] = np.roll(self.v[j, i], self.t[i], axis=0)
Пример #12
0
 def find_constant_for_all(self, max_iteration_number=40):
     # we need to find F(every E and T) - F(E_min, T_min) = F(every E and T)
     # at all steps and summ it for each point (E, T)
     E = np.arange(-100, 200, 0.25)
     T = np.arange(-30, 40, 0.05)
     X, Y = np.meshgrid(E, T)
     d1x, d2x = np.shape(X)
     d1y, d2y = np.shape(Y)
     X_ = X.reshape((d1x * d2x, 1))
     Y_ = Y.reshape((d1y * d2y, 1))
     t = 1  # start time
     # Z_ = self.vector_calc_fuctional(X_, Y_, t)
     z_min = np.array((0,))
     y_min = np.array((0,))
     x_min = np.array((0,))
     # np array for quadratic errors of each iteration
     error = np.zeros((np.len(E), np.len(T)))
     for i in range(0, max_iteration_number):
         pass
Пример #13
0
  def integrate(self, loBE = 0., hiBE = 1., model = "linear", integralmethod = "simpson", args = "none"): 
    """
    Returns a float value that represents the area under specified peaks in XPS data. 

    The inputs are:
      loBE [eV]: Numerical value of the lower bound of the binding energy interval (abscissa) to be analyzed. Default: 0. eV. 
      hiBE [eV]: Numerical value of the upper bound of the binding energy interval (abscissa) to be analyzed. Default: 1. eV. 
      model: A string indicating the background type to be removed. Valid inputs are "linear", "shirley", "tougaard", "blended" or "none". Default: "linear". 
      integralmethod: A string indicating method of integration. Valid inputs are "simpson" and "trapezoid". Default: "simpson".  
      args: Other arguments affecting the integration method. Default: "none". 
      
      
    """    
#     First, we need to determine the indices of the upper and lower bounds of the energy interval of interest.    
    
    #Index of starting energy 
    n1 = 0
    while self["BE"][n1] <= loBE:
      n1 = n1 + 1
      
    #Index of ending energy
    n2 = numpy.len(self["BE"]) - 1
    while self["BE"][n2] >= hiBE:
      n2 = n2 - 1

#     Second, we need to produce a numpy array that represents the XPS background.

    if model == "none":
      pass
    
    elif model == "linear":
      for i in range(n1, n2):
        StaibDat.rm_background(self["C1"], loBE = self["BE"][n1], hiBE = self["BE"][n2])
    
    elif model == "shirley":
      for i in range(n1, n2):
        StaibDat.rm_background(self["C1"], loBE = self["BE"][n1], hiBE = self["BE"][n2], model = "shirley")
    
    elif model == "blended":
      for i in range(n1, n2):
        StaibDat.rm_background(self["C1"], loBE = self["BE"][n1], hiBE = self["BE"][n2], model = "blended")
    
    elif model == "tougaard":
      for i in range(n1, n2):
        StaibDat.rm_background(self["C1"], loBE = self["BE"][n1], hiBE = self["BE"][n2], model = "tougaard")

#     Finally, we integrate using scipy's integration functions to obtain a float value.
    
    if integralmethod == "simpson":
      scipy.integrate.simps(self["C1"][n1:n2] - backgroundvalues(), self["BE"][n1:n2])
    
    elif integrelmethod == "trapezoid":
      scipy.integrate.trapz(self["C1"][n1:n2] - backgroundvalues(), self["BE"][n1:n2])    
Пример #14
0
def medianFilt(oi, kernel_size=None):
    """
    kernel_size is the half width
    """
    if type(oi) == list:
        return [medianFilt(o, kernel_size=kernel_size) for o in oi]

    if 'OI_FLUX' in oi.keys():
        # -- make sure the tellurics are handled properly
        if 'TELLURICS' in oi.keys():
            t = oi['TELLURICS']
        else:
            t = np.ones(np.len(oi['WL']))
        for k in oi['OI_FLUX'].keys():
            for i in range(len(oi['OI_FLUX'][k]['MJD'])):
                mask = ~oi['OI_FLUX'][k]['FLAG'][i, :]
                oi['OI_FLUX'][k]['FLUX'][i, mask] = scipy.signal.medfilt(
                    oi['OI_FLUX'][k]['FLUX'][i, mask] / t[mask],
                    kernel_size=kernel_size) * t[mask]
                oi['OI_FLUX'][k]['EFLUX'][i, mask] /= np.sqrt(kernel_size)
    if 'OI_VIS' in oi.keys():
        for k in oi['OI_VIS'].keys():
            for i in range(len(oi['OI_VIS'][k]['MJD'])):
                mask = ~oi['OI_VIS'][k]['FLAG'][i, :]
                oi['OI_VIS'][k]['|V|'][i, mask] = scipy.signal.medfilt(
                    oi['OI_VIS'][k]['|V|'][i, mask], kernel_size=kernel_size)
                oi['OI_VIS'][k]['E|V|'][i, mask] /= np.sqrt(kernel_size)

                oi['OI_VIS'][k]['PHI'][i, mask] = scipy.signal.medfilt(
                    oi['OI_VIS'][k]['PHI'][i, mask], kernel_size=kernel_size)
                oi['OI_VIS'][k]['EPHI'][i, mask] /= np.sqrt(kernel_size)

    if 'OI_VIS2' in oi.keys():
        for k in oi['OI_VIS2'].keys():
            for i in range(len(oi['OI_VIS2'][k]['MJD'])):
                mask = ~oi['OI_VIS2'][k]['FLAG'][i, :]
                oi['OI_VIS2'][k]['V2'][i, mask] = scipy.signal.medfilt(
                    oi['OI_VIS2'][k]['V2'][i, mask], kernel_size=kernel_size)
                oi['OI_VIS2'][k]['EV2'][i, mask] /= np.sqrt(kernel_size)

    if 'OI_T3' in oi.keys():
        for k in oi['OI_T3'].keys():
            for i in range(len(oi['OI_T3'][k]['MJD'])):
                mask = ~oi['OI_T3'][k]['FLAG'][i, :]
                oi['OI_T3'][k]['T3PHI'][i, mask] = scipy.signal.medfilt(
                    oi['OI_T3'][k]['T3PHI'][i, mask], kernel_size=kernel_size)
                oi['OI_T3'][k]['ET3PHI'][i, mask] /= np.sqrt(kernel_size)

                oi['OI_T3'][k]['T3AMP'][i, mask] = scipy.signal.medfilt(
                    oi['OI_T3'][k]['T3AMP'][i, mask], kernel_size=kernel_size)
                oi['OI_T3'][k]['ET3AMP'][i, mask] /= np.sqrt(kernel_size)
    return oi
Пример #15
0
        def minimize(self, coefficients=None, degree=None, initialize=False):
            if initialize:
                if coefficients is not None:
                    degree = len(coefficients)
                else:
                    # try a random set of coefficients
                    if degree is None:
                        degree = np.len(self.target)
                    coeff = np.zeros((degree, 6), dtype=np.float64)
                    coeff[0, 0] = kc
                    coeff[:, 3] = 1.0
                    return coeff
            # actual optimisation
            self.earliercoefficients = np.array(self.coefficients)

            def error(*args):
                pass
Пример #16
0
def arm_selection(network,arms,alg=discounted_thompson,gamma = 1, field=[],first_iter=False):
    """
    Performs one round when the agents select an arm based on a specified algorithm and assign the allocation
    sequence and array of rewards to the corresponding agent.
    
    Inputs:
    network - network of social interactions
    field - Gaussian random field
    alg - which allocation algorithm will be used (default: deterministic)
    """
    global K
    
    if alg == deterministic_ucl or alg == block_ucl:
        arms = field_to_arms(field)
        for i in range(len(network)):
            priors = network.node[i]['prior mean']
            prior_var = network.node[i]['prior variance']
            variance = network.node[i]['variance']
            n_arms = np.len(arms)
            n_visits = network.node[i]['n']
            avg_arms = network.node[i]['m_bar']
            time_horizon = 1
            a,b,n,m_bar = alg(priors,prior_var,variance,n_arms,time_horizon,arms,n_visits, avg_arms,random_rewards=False)
            network.node[i]['alloc_seq'] = np.append(network.node[i]['alloc_seq'],field_to_arms(a,math.sqrt(n_arms)))
            network.node[i]['rewards'] = np.append(network.node[i]['rewards'],b)
            network.node[i]['prior mean'] = np.mean(network.node[i]['rewards']) #update priors to have them ready for next iteration 
            network.node[i]['n'] = network.node[i]['n'] + n
            network.node[i]['m_bar'] = network.node[i]['m_bar'] + m_bar
    

    elif alg == discounted_thompson or alg == discounted_thompson_general:
        for i in range(len(network)):
            alpha_0 = beta_0 = 1
            S = network.node[i]['S']
            F = network.node[i]['F']
            T = 1
            alloc_seq,reward_seq,S,F = alg(arms,gamma,alpha_0,beta_0,K, T, S,F, first_iter)
            network.node[i]['alloc_seq'] = np.append(network.node[i]['alloc_seq'],alloc_seq)
            network.node[i]['rewards'] = np.append(network.node[i]['rewards'],reward_seq)
            network.node[i]['S'] = S
            network.node[i]['F'] = F
    else:
        print ('ERROR, a mistake in this function')
Пример #17
0
 def prime(z):
     alpha = 0.2
     # Implementing MAP classifier along with Softmax
     if not f:
         """Compute softmax values for each sets of scores in x."""
         e_x = np.exp(x - np.max(x))
         return e_x / e_x.sum()
         scores = [0.0, 1.0, 2.0]
         print(Softmax(scores))
         q = np.array(3)
         q[0] = np.len(x[y==0]) / np.len(x)
         q[1] = np.len(x[y==1]) / np.len(x)
         q[2]= np.len(x[y==2]) / np.len(x)
         for l in range(2):
             z = np.argmax(scores[l]*q[l])
     z[z < 0] = alpha
     z[z > 0] = 1
     return z      
Пример #18
0
def basisPursuit_Lorenz96(Vapproximate,D,optEquation,optPolynomial,opts,sigma):
    n = Vapproximate.shape[1]
    Vtest = Vapproximate[:, optEquation - 1] # optEquation between 1 to n inclusive
    soln = np.zeros((D.shape[1],1))

    # normalize dictionary and solve the optimization
    Dnormalized = np.divide(D, np.tiles(np.sqrt(np.sum(np.square(D), axis = 0)),D.shape[0],1))
    c,r,g,d = spgl1(Dnormalized, Vtest, 0, sigma, verbosity = opts.get('verbosity'), iter_lim = opts.get('iterations') )
    c = np.divide(c, np.sqrt(np.sum(np.square(D), axis = 0)) # for D normalized

    # rescale back to the monomial basis
    cRecoverIndex = np.argwhere(c)
    if optPolynomail == 'legendre':
        for i in range (0, np.len(cRecoverIndex)):
            indTmp = cRecoverIndex[i]
            if (indTmp == 0):
                soln(indTmp) = c[indTmp]
            elif (indTmp <= n):
                soln(indTmp) = c(indTmp) * math.sqrt(3)
            else:
                soln(indTmp) = c(indTmp) * 3
    else:
        soln = c
    return soln
Пример #19
0
  def rm_background(self, loBE = 0., hiBE = 1., model = "linear", offset = 0., blend = [], tParam = []):	
    """
    Return a numpy array corresponding to the background electron count.
    
    XPS data typically consists of the sum of the signal from an XPS event and
    some background count from scattered electrons. rm_background calculates
    the background electron count over a specified interval and returns an
    array with this data. The interval is defined by the loBE and hiBE input
    arguments, the default being the lower bound and upper bound of the
    object's binding energy data, respectively. The returned array has
    elements corresponding to the elements in self["BE"] in the specified
    energy interval by default. 
    
    Input arguments as well as their units and default values are given as
    follows:
      loBE [eV]: Numerical value of the lower bound of the binding energy
      interval to be analyzed. Default: lower bound of the object's binding
      energy.
      hiBE [eV]: Numerical value of the upper bound of the binding energy
      interval to be analyzed. Default: upper bound of the object's binding
      energy.
      model: A string indicating the name of background removal algorithm to
      use. Valid inputs are "linear", "shirley", "blended" for blended Shirley 
      type background, or "tougaard".
      offset [eV]: A float value of desired energy offset for calculation
      of background. Default: 0. eV. 
      blend: A float value between 0 and 1 for blending linear and Shirley
      backgrounds. See notes for blend-type background. Default: [].
      tParam = A float value for use in the tougaard removal algorithm. 
      Default: [].
      
    """
    
    background_values = list()
    
#Index of starting energy 
    n1 = 0
    while self["BE"][n1] <= loBE:
      n1 = n1 + 1
      
#Index of ending energy
    n2 = numpy.len(self["BE"]) - 1
    while self["BE"][n2] >= hiBE:
      n2 = n2 - 1

#    energyInterval = (self["BE"] > loBE) & (self["BE"] < hiBE) 
             

#     For materials with a relatively small step in the background over the 
#     energy range covered by the peaks, the background in this case may be 
#     approximated by a linear type background:
#     
#     L(E) = (I1 * (E2 - E) + I1 * (E - E1)) / (E2 - E1)
#     
#     where E1 and E2 are two distinct energies and I1 and I2 are the two 
#     associated intensity values.
    
    if model == "linear":
      for i in range(n1, n2):
        BG = (self["C1"][n1] * (self["BE"][n2] - self["BE"][i]) + self["C1"][n2] * (self["BE"][i] - self["BE"][n1])) / (self["BE"][n2] - self["BE"][n1])
        background_values().append(BG)
      return numpy.array(background_values)
    
    
#     The Shirley algorthim is an iterative determination of the background.
#     The formula for computing the Shirley background is:
#     
#     S(E) = I2 + (I1 - I2) * A2(E) / (A1(E) + A2(E))
#     
#     where the integrated areas A1(E) and A2(E) represent the area under
#     the spectrum to the left and right of the energy value E. 
#
#     See the original paper at DOI: 10.1103/PhysRevB.5.4709.

    elif model == "shirley":
      
      for i in range(n1, n2):
        A1[i] = StaibDat.integrate(loBE = self["BE"][0], hiBE = self["BE"][i], model = "shirley", integralmethod = "simpson")
        A2[i] = StaibDat.integrate(loBE = self["BE"][i], hiBE = self["BE"][-1], model = "shirley", integralmethod = "simpson")
        BG = self["C1"][n2] + (self["C1"][n1] - self["C1"][n2]) * A1[i] / (A1[i] + A2[i])
        background_values().append(BG)
      return numpy.array(background_values)
    
#     If you find that area intensity ratios of certain peaks are violated
#     using pure Shirley or linear backgrounds, use a blended type in order
#     to satisfy whatever ratio is dictated by physics (e.g. degeneracy of
#     doublet peaks, etc.) to make a "less wrong" background. 
#     
#     The blended type background is calculated from a blend of linear and 
#     an offset Shirley backgrounds:
#     
#     OS(E:u,v) = S(E - v) * (1 - u) + u * L(E)
#     
#     The parameters u and v represent a linear blend between the Shirley
#     background (u = 0) and a linear background (u = 1), where the Shirley
#     curve is offset by an energy of v eV. 

          
    elif model == "blended":
    
      if offset != 0.:
        n3 = 0
    	while self["BE"][n3] <= self["BE"][n1] + offset:
      	  n3 = n3 + 1      
      
      if n3 < n1:
        for i in range(n3, n2):
          linBG = (self["C1"][n1] * (self["BE"][n2] - self["BE"][i]) + self["C1"][n2] * (self["BE"][i] - self["BE"][n1])) / (self["BE"][n2] - self["BE"][n1])
          A1[i] = StaibDat.integrate(loBE = self["BE"][0], hiBE = self["BE"][i], model = "shirley", integralmethod = "simpson")
          A2[i] = StaibDat.integrate(loBE = self["BE"][i], hiBE = self["BE"][-1], model = "shirley", integralmethod = "simpson")
          ShrBG = self["C1"][n2] + (self["C1"][n3] - self["C1"][n2]) * A1[i] / (A1[i] + A2[i])
          BG = blend * linBG + ShrBG * (1 - blend)
          background_values().append(BG)
        return numpy.array(background_values)
      
      elif n3 >= n1:
        for i in range(n1, n2):
          linBG = (self["C1"][n1] * (self["BE"][n2] - self["BE"][i]) + self["C1"][n2] * (self["BE"][i] - self["BE"][n1])) / (self["BE"][n2] - self["BE"][n1])
          A1[i] = StaibDat.integrate(loBE = self["BE"][0], hiBE = self["BE"][i], model = "shirley", integralmethod = "Simpson")
          A2[i] = StaibDat.integrate(loBE = self["BE"][i], hiBE = self["BE"][-1], model = "shirley", integralmethod = "Simpson")
          shrBG = self["C1"][n2] + (self["C1"][n1] - self["C1"][n2]) * A1[i] / (A1[i] + A2[i])
          BG = blend * linBG + shrBG * (1 - blend)
          background_values().append(BG)
        return numpy.array(background_values)

#     The Tougaard background is based on an energy loss cross section F(x) 
#     representing the probability that an electron at energy offset x undergoes
#     a loss event and therefore appears as a contibution to the background. 
#     
#     See: DOI:10.1016/S0039-6028(98)00852-8
    	
    elif model == "tougaard":
      for i in range(n1, n2):
        for j in range(n1, n2):
          BG =  scipy.integrate.trapz(B * self["C1"][i] * self["C1"][j - i] / (1634 + (self["C1"][j - i])^2)^2)
          background_values().append(BG) 

    else: 
      raise InputError 
Пример #20
0
counter = 0
for number in range(number):
    print(message)

#%%
message = int(input("Enter the total numbers you want to be averaged:"))

A = []
counter =0
for counter in range(message):
    counter+=1
    A.append(counter) 
print(A)    

total = int(np.sum(A))
length = int(np.len(A))
average = int(total/length)

print("Average: %s" % average)

#%%%

message = int(input("Enter the total numbers you want to be averaged:"))


for i in range(message):
    sum =+ i
    values = int(i)

average= int(sum/values)
Пример #21
0
 def __len__(self):
     return np.len(self.trace)
Пример #22
0
import numpy as num
import matplotlib.pyplot as plt

data = np.loadtxt("random_walks.txt")
y = data[:, 0]
x = num.linspace(0, num.len(data), 1)
plt.hist(y, x, (0, 100))
plt.show()
plt.savefig("rand.png")
Пример #23
0
def vfit_grad( X, Y, V, V_err, nmin=7):
    """
    Function to fit a single gradient to a velocity field.
    It assumes solid body rotation, and it uses the velocity uncertainty.

    X : Off-Set. With appropriate units (e.g. deg)
    Y : Off-Set. With appropriate units (e.g. deg)
    V : Radial velocity. With appropriate units (e.g. km/s)
    V_err : Uncertainty in radial velocity. With appropriate units (e.g. km/s)

    param :
    nmin : Minimum number of pixels required to carry out the fit. Default is 7, 
    which is appropriate for single dish data that is Nyquist sampled

    OUTPUTS:
    Vc:       Mean centroid velocity in km/s
    Vc_err:   Uncertainty of the mean centroid velocity in km/s
    Grad:     Velocity gradient in km/s/pc
    Grad_Err: Uncertainty associated to the velocity gradient (km/s/pc)
    PosAng:   Position angle of the fitted gradient, in degrees
    PAErr:    Uncertainty of the position angle (degrees)
    ChiSq:    Chi^2
    """

    # Xold = copy(X)
    # Yold = copy(Y)
    npts = X.shape
    
    if npts < nmin:
        results = result_container()
        results.Grad = np.nan
        results.Grad_err = np.nan
        results.GradPA = np.nan
        results.GradPA_err = np.nan
        results.Vc = np.nan
        results.Vc_err = np.nan
        return results #
    wt = 1/(V_err**2)
# Obtain total weight, and average (x,y,v) to create new variables (dx,dy,dv)
# which provide a lower uncertainty in the fit.
#
    sumWt  = np.sum(wt)
    x_mean=np.sum(X*wt)/sumWt
    y_mean=np.sum(Y*wt)/sumWt
    v_mean=np.sum(V*wt)/sumWt
    dx = (X-x_mean)#[mask]  # remove mean value from inputs 
    dy = (Y-y_mean)#[mask]  # to reduce fit uncertainties
    dv = (V-v_mean)#[mask]  #
    M = [[np.sum(wt),   np.sum(dx*wt),    np.sum(dy*wt)], 
        [np.sum(dx*wt), np.sum(dx**2*wt), np.sum(dx*dy*wt)], 
        [np.sum(dy*wt), np.sum(dx*dy*wt), np.sum(dy**2*wt)]]
  #print M
    from scipy import linalg
    try:
        covar = linalg.inv(M)
    except IOError:
        import sys
        sys.exit('Singular matrix: no solution returned')
    coeffs = np.dot(covar,[[np.sum(dv*wt)], [np.sum(dx*dv*wt)],[np.sum(dy*dv*wt)]])
    #
    errx= np.sqrt(covar[1, 1])
    erry= np.sqrt(covar[2, 2])
    #
    gx = coeffs[1][0]
    gy = coeffs[2][0]
    #
    vc = coeffs[0]+v_mean
    vp = coeffs[0]+coeffs[1]*dx+coeffs[2]*dy
    grad     = np.sqrt(coeffs[1]**2+coeffs[2]**2)
    posang   = np.arctan2(gy, -gx)*180/pi
    #print -gx,gy,posang
    red_chisq = np.sum( (dv-vp)**2*wt)/(np.len(dv)-3.)

    vc_err   = 0.
    grad_err = np.sqrt((gx*errx)**2+(gy*erry)**2)/grad
    #print 'grad_err1', grad_err
    grad_err = np.sqrt((gx*errx)**2+(gy*erry)**2+2*gx*gy*covar[2,1])/grad
    #print 'grad_err2', grad_err
    paerr    = 180/pi*sqrt((gx/(gx**2+gy**2))**2*erry**2+
                         (gy/(gx**2+gy**2))**2*errx**2)
    #print 'paerr1',paerr
    paerr    = 180/pi*sqrt((gx/(gx**2+gy**2))**2*erry**2+
                         (gy/(gx**2+gy**2))**2*errx**2-2*gx*gy/(gx**2+gy**2)**2*covar[2,1])
    #print 'paerr2',paerr
    chisq    = red_chisq
    vp += v_mean
    # Restore X and Y
    # X=copy(Xold)
    # Y=copy(Yold)
    results = result_container()
    results.Grad = grad
    results.Grad_err = grad_err
    results.GradPA = posang
    results.GradPA_err = paerr
    results.Vc = vc
    results.Vc_err = vc_err
    return results #
Пример #24
0
def fit_gaussian1D_mask(signal, position, mask_width, center_type="COM"):
    """
    Fit a 2D gaussian to a masked image based on
    the location of the mask, size of the mask and
    the type of the mask
    
    Parameters
    ----------
    signal:      ndarray
                 The 1D signal that will be fitted with the Gaussian
    position:    float
                 x center of the mask
    mask_width:  float
                 The size of the mask.
    center_type: str
                 Center location for the first pass of the Gaussian.
                 Default is `COM`, while the other options are `minima`
                 or `maxima`.
    
    Returns
    -------
    popt: tuple
          Refined X position, Standard deviation, Amplitude
    
    Notes
    -----
    This code uses the `scipy.optimize.curve_fit` module to fit a 1D
    Gaussian peak to masked data. `mask_x` refers to the initial 
    starting position. Also, this can take in `minima` as a string 
    for initializing Gaussian peaks.
    
    See also
    --------
    fit_gaussian2D_mask
    initialize_gauss1D
    gaussian_1D_function
    
    :Authors:
    Debangshu Mukherjee <*****@*****.**>
    """
    xV = np.arange(np.len(signal))
    sub = np.abs(xV - position) < mask_width
    x_pos = np.asarray(xV[sub], dtype=np.float)
    masked_signal = np.asarray(signal[sub], dtype=np.float)
    mi_min = np.amin(masked_signal)
    mi_max = np.amax(masked_signal)
    if center_type == "minima":
        calc_signal = (masked_signal - mi_max) / (mi_min - mi_max)
        initial_guess = initialize_gauss1D(x_pos, calc_signal, "maxima")
    else:
        calc_image = (masked_image - mi_min) / (mi_max - mi_min)
        initial_guess = initialize_gauss1D(x_pos, calc_signal, center_type)
    lower_bound = ((initial_guess[0] - mask_width), 0,
                   ((-2.5) * initial_guess[5]))
    upper_bound = (
        (initial_guess[0] + mask_radius),
        (2.5 * mask_radius),
        (2.5 * initial_guess[5]),
    )
    popt, _ = spo.curve_fit(
        gaussian_1D_function,
        x_pos,
        calc_signal,
        initial_guess,
        bounds=(lower_bound, upper_bound),
        ftol=0.01,
        xtol=0.01,
    )
    if center_type == "minima":
        popt[-1] = (popt[-1] * (mi_min - mi_max)) + mi_max
    popt[-1] = (popt[-1] * (mi_max - mi_min)) + mi_min
    return popt
Пример #25
0
    def step(self, a,LoggedSignals,params):
        #s = self.state
        nSteps=8200;
        Constants= params.Constants, Settings=params.Settings, T70=0

        oilUptakeDelay=Constants.OIL_UPTAKE_DELAY/Settings.DT;
        V=80; Amax=1.21; Amin=0.79;

        state1= LoggedSignals.State,  T=LoggedSignals.T
        mask8=(T%480)>475#% for RT : exeute action of Ag/ammonia only each 8h
        if params.RT==0:
            mask8=1

        S=LoggedSignals.S
        if ( params.control=='valid') &  LoggedSignals.Start>0:
              LoggedSignals.Start=0;T=0;params.RT=0# temp
        if T>LoggedSignals.Start:
            A=LoggedSignals.A1[T]
            SOil=LoggedSignals.SOil
            DO=LoggedSignals.DO1[T], X=LoggedSignals.X1[T]
        else:
            # start
            A=LoggedSignals.A
            LoggedSignals.isDexLow=0
            LoggedSignals.soyBeanFeedMat=np.zeros(np.len(params.oil_feed_time),nSteps)
            SOil=0
            DO=LoggedSignals.DO
            X=LoggedSignals.X/Constants.prop_biomss
            #LoggedSignals.S=State(6);
        t_se=480;delA=params.delAmat(Action)
        if ~mask8:
            delA=0
        if params.RT==1:
            t_se=60
        if T==0:
            t_se=24*60-1
        LoggedSignals.F_s[1440] =0
        LoggedSignals.Ag[1440] =285
        for i in range(1,t_se):
            T=T+1 #;% if T>1; LoggedSignals.F_s[T] =LoggedSignals.F_s(T-1) ;end
            if T<24*60:
                LoggedSignals.fs_min[T]=0; LoggedSignals.fs_max[T]=0
                LoggedSignals.ag_min[T]=Settings.agL(1); LoggedSignals.ag_max[T]=Settings.agH(1);
                if T==1:
                    LoggedSignals.Ag[T] =285
                LoggedSignals.F_s[T] =0
                A=params.meanA24[T];
            elif T>24*60-1 & T<70*60:
                A=A+delA
                temA=A
                A=temA-delA if temA>Amax else A #   isApple = True if fruit == 'Apple' else False
                A=temA-delA if temA<Amin else A
                delA=0
                if  not( np.where(LoggedSignals.S<2 & LoggedSignals.S>0,1,'first')):#LoggedSignals.S[T]>5
                    if T==24*60:
                        LoggedSignals.F_s[T]=100; LoggedSignals.Ag[T] =271;
                        A=1.19
                    LoggedSignals.fs_min[T]=Settings.fsL(2); LoggedSignals.fs_max[T]=Settings.fsH(2)
                    LoggedSignals.ag_min[T]=Settings.agL(2); LoggedSignals.ag_max[T]=Settings.agH(2)
                else:
                    if T== np.where(LoggedSignals.S<2 & LoggedSignals.S>0,1,'first'):
                        LoggedSignals.T2=T
                        LoggedSignals.F_s[T] =140
                    LoggedSignals.fs_min[T]=params.Settings.fsL(3); LoggedSignals.fs_max[T]=params.Settings.fsH(3);
                    LoggedSignals.ag_min[T]=Settings.agL(2); LoggedSignals.ag_max[T]=Settings.agH(2);

            elif T>70*60-1:
                A=A+delA
                temA=A
                A=temA-delA if temA>Amax else A #   isApple = True if fruit == 'Apple' else False
                A=temA-delA if temA<Amin else A
                delA=0
                LoggedSignals.fs_min[T]=params.Settings.fsL(4); LoggedSignals.fs_max[T]=params.Settings.fsH(4);
                LoggedSignals.ag_min[T]=params.Settings.agL(3); LoggedSignals.ag_max[T]=params.Settings.agH(3);
                if T==70*60:
                    LoggedSignals.F_s[T] =110;            LoggedSignals.Ag[T] = LoggedSignals.Ag(T-1);


            delFs=params.fsM(Action);
            if i>1 or (LoggedSignals.F_s[T] +params.fsM(Action))>LoggedSignals.fs_max[T] or (LoggedSignals.F_s[T] +params.fsM(Action))<LoggedSignals.fs_min[T]:
                delFs=0;
            LoggedSignals.F_s[T+1] =LoggedSignals.F_s[T] +delFs#;% FS solution
            LoggedSignals.F_s[T] =Constants.DEXTROSE_FEEDING_CONCENTRATION*LoggedSignals.F_s[T];
            if T<30*60:# % late feeding
                  LoggedSignals.F_s[T]=0;
            delAg=params.agM(Action);
            if i==1 and isfield(LoggedSignals,'T2')and T>=LoggedSignals.T2 and T<=(LoggedSignals.T2+24*60):
               delAg =np.min(params.agM)

            if ~mask8 or i>1 or (LoggedSignals.Ag[T] +delAg)>LoggedSignals.ag_max[T] or (LoggedSignals.Ag[T] +delAg)<LoggedSignals.ag_min[T]:
                delAg=0;

            # for T>70 , Ag rate is 16h (for t<70 it equal to 8h)
                 #    T70=[T70; T];if T70(end)-T70(end-1)<1.2*t_se; delAg=0; end

            LoggedSignals.Ag[T+1] =LoggedSignals.Ag[T] +delAg;
            # LoggedSignals.Ag[T] =max(LoggedSignals.Ag[T] ,LoggedSignals.ag_max[T]);
            if T==LoggedSignals.Start+1: # %T==1
                #%LoggedSignals.Ag=zeros(1,9000);
                LoggedSignals.rnd1=1+floor(rand(1)*length(params.impData1));
                LoggedSignals.AgOld=280#;%params.impData1{1,LoggedSignals.rnd1}.agitation[T];
                LoggedSignals.S[T]=S; LoggedSignals.P1[T]=0;#%params.impData1{1,LoggedSignals.rnd1}.sConcen(1);



            if i==1:
                   # %  LoggedSignals.Ag[T]=LoggedSignals.AgOld+params.agM( Action);
                LoggedSignals.AgOld=LoggedSignals.Ag[T];

            rnd1=LoggedSignals.rnd1;
        #% type
            """
              if strcmp( params.control,'FsAg'):
                i=i
             elif  params.control=='valid':
                LoggedSignals.F_s[T] =params.impData1{1,rnd1}.F_s[T];% LoggedSignals.Ag[T]= LoggedSignals.AgOld; %LoggedSignals.Ag[T]=params.agM( Action);
             LoggedSignals.Ag[T]=params.impData1{1,rnd1}.agitation[T];
             A=params.impData1{1,rnd1}.A[T];
             Constants.airFlowVVM=params.impData1{1,rnd1}.airFlowVVM[T];
             if T==1% daynamics verfication
                LoggedSignals.S[T]=  params.impData1{1,1}.sConcen(1);
                X=0.15;
              % State(2)=params.impData1{1,1}.biomass(1);
               DO=0.007;%=params.impData1{1,1}.DOg2l(1);
                A=params.impData1{1,1}.A(1);
    
            """

        #% dymamics
            idx=1;
            relRows= T>2 and np.transpose(LoggedSignals.isDexLow)==0 and np.transpose(LoggedSignals.S[T])<5;
            if relRows:
                    LoggedSignals.isDexLow=1;
                    relFeedingIdx=T+np.arange(oilUptakeDelay,T)+oilUptakeDelay+params.oil_feed_time(idx);#%period of soybean oil feeding
     #               norm_feeding_bell=self.new_bell(relFeedingIdx,length(relFeedingIdx)/0.5,...
    #%                 0.1,t+oilUptakeDelay+(params.oil_feed_time(idx)/2));% normalized sigmoid value for feeding
                    c1=T+oilUptakeDelay+params.oil_feed_time(idx)/2
                    sig=params.oil_feed_time(idx)/4
                    norm_feeding_bell=exp(-((relFeedingIdx-c1)/sig)^2);
                    sum_norm_feeding_bell=np.sum(norm_feeding_bell,2);
                    feeding_bell=norm_feeding_bell*params.InitialCond.SoybeanOil/sum_norm_feeding_bell
                    LoggedSignals.soyBeanFeedMat[idx,relFeedingIdx(relFeedingIdx<=nSteps)]=feeding_bell(relFeedingIdx<=nSteps)*params.oil_utility(idx);
                    LoggedSignals.soyBeanFeedMat[idx,relFeedingIdx(relFeedingIdx<=nSteps)]=0;
                    relDepressionIdx=np.arange(T,T+oilUptakeDelay)
                    recoveringIdx=T+np.arange(oilUptakeDelay,nSteps)
    #%                 tau=T+oilUptakeDelay;
                    tau=c1;
                    LoggedSignals.C[idx,relDepressionIdx]=params.oil_utility(idx);
                    LoggedSignals.C[idx,recoveringIdx]=1-(1-params.oil_utility(idx))*exp(-((recoveringIdx-(T+oilUptakeDelay))
                                                                                           /(tau-(T+oilUptakeDelay))));

#%                 DO[idx,t-1]=min(DO(idx,t-1)+0.15*Constants.DO_MAX,Constants.DO_MAX);

 #                if 0#%strcmp( params.test1,'DO10')
#                                                    DO=0.2*Constants.DO_MAX;

            rel_mo=params.m_o[1]*LoggedSignals.C[T];
            rel_mx=params.m_x(1)*LoggedSignals.C[T];
            div=np.sqrt(params.K_I(1)*params.K_ps2(1))/(params.K_I(1)+(sqrt(params.K_I(1)*params.K_ps2(1)))+
                                                        (sqrt(params.K_I(1)*params.K_ps2(1)))^2/params.K_ps2(1));
            norm_mich_ment=(LoggedSignals.S[T]/(params.K_I(1)+LoggedSignals.S[T]+
                (LoggedSignals.S[T]^2./params.K_ps2(1))))/div;
            norm_mich_ment_oil=(SOil/(params.K_I(1)+SOil+...
                        (SOil^2./params.K_ps2(1))))/div;
            mu=LoggedSignals.C[T]*params.mu_x(1)*(LoggedSignals.S[T]/(params.K_x(1)+LoggedSignals.S[T]))*(DO/(params.K_ox(1)+DO))*(A/(params.K_xa(1)+A))
            alpha1=4e-4; ag05=290;
            mu_ag=1/(1+exp(alpha1*ag05*(LoggedSignals.Ag[T]-ag05)))
            mu_pp=LoggedSignals.C[T]*params.mu_p(1)*(A/(params.K_p.ravel()+A))*(DO/(DO+params.K_op(1)))*norm_mich_ment;
            mu_pp=mu_pp*mu_ag;
            mu_pp_oil=LoggedSignals.C[T]*params.mu_p(1)*(A/(params.K_p(1)+A))*(DO/(DO+params.K_op(1)))*norm_mich_ment_oil #;%Dextrose part of Michaelis Menten
            mu_pp_oil=mu_pp_oil*mu_ag;
            m_xOil=rel_mx*SOil/(LoggedSignals.S[T]+SOil);
            m_xDex=rel_mx*LoggedSignals.S[T]/(LoggedSignals.S[T]+SOil);
            minusSOil=Settings.DT*X*(mu_pp_oil/params.Y_ps(1)+m_xOil);
            plusSOil=SOil+ LoggedSignals.soyBeanFeedMat[T];
            isOverDraft=minusSOil>plusSOil;
            coefOil=np.ones(length(params.K_x(1)),1);
            coefOil[isOverDraft]=plusSOil(isOverDraft)/minusSOil(isOverDraft);
            SOil=SOil+ LoggedSignals.soyBeanFeedMat[T]-Settings.DT*X*coefOil*(mu_pp_oil/params.Y_ps(1)+m_xOil);



            plusDO=DO+Settings.DT*(params.K_la(1)*
                ((LoggedSignals.Ag[T]/Constants.AGI_REF_VAL)^2*(Constants.airFlowVVM/Constants.AF_REF_VAL))*(Constants.DO_MAX-DO));
            minusDO=Settings.DT*(X*
                ((mu/params.Y_xo(1))+rel_mo+(mu_pp+mu_pp_oil)/params.Y_po(1)));
            DOCoef=(plusDO-(Constants.DO_MAX/25))/minusDO;
            plusS=LoggedSignals.S[T]+Settings.DT*(LoggedSignals.F_s[T]/V);
            minusS=Settings.DT*(X*
                ((mu/params.Y_xs(1))+m_xDex+
                mu_pp/params.Y_ps(1)));
            SCoef=(plusS-0.1)/minusS;
            reductionCoef=min(DOCoef,SCoef);
            naturalS=LoggedSignals.S[T]+Settings.DT*(-X*((mu/params.Y_xs(1))+m_xDex+mu_pp/params.Y_ps(1))+LoggedSignals.F_s[T]/V);
            coefS=LoggedSignals.S[T]+Settings.DT*(-X*reductionCoef*
                ((mu/params.Y_xs(1))+m_xDex+
                mu_pp/params.Y_ps(1))+LoggedSignals.F_s[T]/V);

            naturalDO=DO+Settings.DT*(X*...
                (-(mu/params.Y_xo(1))-rel_mo-(mu_pp+mu_pp_oil)/params.Y_po(1))+params.K_la(1)*
                ((LoggedSignals.Ag[T]/Constants.AGI_REF_VAL)^2*
                (Constants.airFlowVVM/Constants.AF_REF_VAL))*(Constants.DO_MAX-DO));
            coefDO=DO+Settings.DT*(X*reductionCoef*
                (-(mu/params.Y_xo(1))-rel_mo-(mu_pp+mu_pp_oil)/params.Y_po(1))+params.K_la(1)*
                ((LoggedSignals.Ag[T]/Constants.AGI_REF_VAL)^2*
                (Constants.airFlowVVM/Constants.AF_REF_VAL))*(Constants.DO_MAX-DO));
            isCoef=coefS>naturalS or coefDO>naturalDO;
            LoggedSignals.S[T+1]=naturalS;
            #%LoggedSignals.S[T](find(isCoef))=coefS(find(isCoef));
            if isCoef:
                LoggedSignals.S[T+1]=coefS;

            DO=naturalDO;
            DO[find(isCoef)]=coefDO(find(isCoef));
            DO=min(Constants.DO_MAX,DO);
            Coef=ones(length(naturalDO),1);
            Coef[find(isCoef)]=reductionCoef(find(isCoef));

            #%A=1;%;params.impData1{1,rnd1}.A;
            X=X+Settings.DT*X*(Coef*mu-params.K_d(1)*A);
            LoggedSignals.P1[T+1]=np.max(0,LoggedSignals.P1[T]+Settings.DT*
                ((Coef*mu_pp+coefOil*mu_pp_oil)*X-params.K(1)*LoggedSignals.P1[T]));#%in [g/l]
            LoggedSignals.A1[T]=A; LoggedSignals.DO1[T]=DO; LoggedSignals.X1[T]=X;  LoggedSignals.A[T]=A;
        for t in range(0, t_end-(dt1+1), Settings['DT']):  #  start from t=1[minutes]
            currModelState[pref['Data variables']] =\
                validationData[pref['Data variables']].iloc[t]
            #  If featuresDist is a modeled parameter, we should not update it from data!!
            currModelState[pref['featuresDist']] = validationData[pref['featuresDist']].iloc[t]

            for var in pref['Variables']:
                delVariableName = var + '_del'
            if t % dt1:
                modeledVars[var].iloc[t+1] =modeledVars[var].iloc[t]

            else:
                varT=modeledVars[var].to_numpy()[t]
                bestParams = results[var]['bestParams']
                relTestData = currModelState[bestParams['features']]#.to_numpy()
                allRelTestDataInit = pd.concat([currModelState[bestParams['features']],currModelState[bestParams['featuresDist']]]
                                , axis=1).dropna()

                allRelTestData = allRelTestDataInit.T.drop_duplicates().T  # Remove duplications from "allRelTrainDataInit" dataframe

                testDistVar = currModelState[bestParams['featuresDist']].to_numpy()
                test_dist={}; train_dist={}; distSumSqr = 0
                for varForDist in bestParams['featuresDist']:
                    test_dist[varForDist] = np.repeat(testDistVar.T,
                                                      dataDict[var]['trainDistVar'].size, axis=0)
                    train_dist[varForDist] = np.repeat(dataDict[var]['trainDistVar'],
                                                       len(testDistVar),
                                                       axis=1)

                    distVarSqr = (test_dist[varForDist] - train_dist[varForDist]) ** 2
                    distSumSqr+=distVarSqr
                npoints=int(np.ceil(bestParams['frac']*dataDict[var]['trainDistVar'][:,0].size))
                dist=np.sqrt(distSumSqr)
                w=np.argsort(dist,axis=0)[:npoints]
                deltaVar, x ,bias_re,bias_vel= loess_nd_test_point_mat\
                            (pref,pd.DataFrame(relTestData).T,bestParams['features'],dataDict[var]['relTrainData'], dataDict[var]['trainDistVar'],
                             dataDict[var]['trainResults'],dist,w,scale_params[var],var,varT, frac=frac1)
                for jj in range(1,1*len(pref['Combinations']),1): # in case of bias go to the next config
                    ii=results[var]['sortedCombinations'][jj]
                    if bias_vel<0.7 or bias_vel>1.3:
                        break
                    if len(pref['Combinations'][ii]['features']) <2:
                        continue
                    allRelTrainDataInitB = pd.concat([modelingDataCombined[pref['Combinations'][ii]['features']],
                                         modelingDataCombined[pref['Combinations'][ii]['featuresDist']],
                                         modelingDataCombined[delVariableName]], axis=1).dropna()

                    allModelingData = allRelTrainDataInitB.T.drop_duplicates().T  # Remove duplications from "allRelTrainDataInit" dataframe

                    #dataDict[var]['relTrainData'] = allModelingData[pref['Combinations'][ii]['features']].to_numpy()  # relevant features for linear equation.
                    relTestData = currModelState[pref['Combinations'][ii]['features']]
                    deltaVar, x ,bias_re,bias_vel= loess_nd_test_point_mat\
                            (pref,pd.DataFrame(relTestData).T,pref['Combinations'][ii]['features'],
                             allModelingData[pref['Combinations'][ii]['features']].to_numpy(), dataDict[var]['trainDistVar'],
                             dataDict[var]['trainResults'],dist,w,scale_params[var],var,varT, frac=frac1)
                    demo=2
                modeledVars[var].iloc[t+1] = \
                        modeledVars[var].iloc[t] + dt1*deltaVar/60
                modeledVars[var+'_biasVel'].iloc[t+1:t+1+dt1]=list(bias_re*np.ones([1,dt1]).T)
            currModelState[var] = modeledVars[var].iloc[t+1]

            if T>(nSteps-480):
                break

        # ODEINT IS TOO SLOW!
        # ns_continuous = integrate.odeint(self._dsdt, self.s_continuous, [0, self.dt])
        # self.s_continuous = ns_continuous[-1] # We only care about the state
        # at the ''final timestep'', self.dt

        sh=0;
        bellMax=0.65;
        lowDexVal=0.1;
        highDexVal=4;
        if LoggedSignals.S(T)>bellMax:
            DexState=(LoggedSignals.S(T)-bellMax)/(highDexVal-bellMax);
        else:
            DexState=-(LoggedSignals.S(T)-bellMax)/(lowDexVal-bellMax);

        LoggedSignals.T=T;
        LoggedSignals.State[1]=(mean( LoggedSignals.DO1(np.arange(T+1-120-sh,T-sh)))-0.0005)/0.0005;
        #% LoggedSignals.State(2)=Constants.prop_biomss*(mean( LoggedSignals.X1(T+1-120:T))-4)/3;%(LoggedSignals.DO1(T-sh)-LoggedSignals.DO1(T-240-sh))/1e-7;
        LoggedSignals.State[2]=0.02*LoggedSignals.F_s(T+1)-2.2;
        LoggedSignals.State[3]=(LoggedSignals.A(T-sh)-1.05)/0.3;
        LoggedSignals.State[4]=((T-sh)-3500)/4000;
        LoggedSignals.State[5]=(LoggedSignals.S(T-sh)-LoggedSignals.S(T-60-sh))/0.3;#%(T/60)*mean( LoggedSignals.F_s(1:T))/V;

        LoggedSignals.State[6]=DexState;
        LoggedSignals.State[7]=((LoggedSignals.Ag(T-sh)-270)/(280-260));
        LoggedSignals.SOil=SOil;
        #% state shrvul
        NextObs = LoggedSignals.State;
#% Reward upon time
        if T>nSteps-480:
            IsDone =1;#%NextObs(3)<-100.05; %abs(X) > EnvConstants.XThreshold || abs(Theta) > EnvConstants.ThetaThresholdRadians;
            Reward=0;
        else:
            IsDone =1;#%NextObs(3)<-100.05; %abs(X) > EnvConstants.XThreshold || abs(Theta) > EnvConstants.ThetaThresholdRadians;
            Reward=0;
        self.state = ns
        terminal = self._terminal()
        reward = -1. if not terminal else 0.
        return (NextObs,reward,IsDone,LoggedSignals, {})
Пример #26
0
def ustarflag(date,
              data,
              flags,
              isday,
              outdir,
              min_thresh=0.1,
              nboot=100,
              ustar_v=2,
              plot=False):
    '''
    Friction velocity flagging for Eddy Covariance data. Assesses the threshold
    of friction velocity (ustar) below which a reduction in CO2 flux correlates
    with ustar and returns a flag here. Originally coded by Tino Rau.
    
    
    Definition
    ----------
    ustarflag(date, isday, data, flags, outdir, min_thresh=0.1, nboot=100,
              ustar_v=2, plot=False):
    
    
    Input
    ----- 
    date        np.array(N), julian date
    data        np.array(N,3), data array with CO2 flux (Fco2 [mumol/m2s),
                friction velocity (ustar [m/s]) and air temperature (T [degC])
    inflag      np.array(N,3), dtype=int, quality flag of data, 0 where data is
                good
    isday       np.array(N), dtype=bool, True where it is day and False where
                it is night
    outdir      str, path of the output folder
    
                        
    Optional Input
    --------------
    min_thresh  float, minimum ustar threshold, recommendation: 0.1 for
                overstorey towers, 0.01 for understorey towers (default: 0.1)
    nboot       int, number of boot straps (default: 100)
    ustar_v     int, value which shall be returned when a value is below the 
                ustar threshold (default: 2)
    udef        int/float, missing value of data (default: -9999) NaN values are
                excluded from computations anyhow.
    plot        bool, if True data and spikes are plotted (default: False)
    
    
    Output
    ------
    flag_out    np.array(N), flag array where everything is 0 except where
                values fall below ustar threshold, there it is ustar_v
    
    
    Restrictions
    ------------
    - works ONLY for a data set of ONE FULL year
    - works ONLY for half hourly time steps
    
    
    License
    -------
    This file is part of the JAMS Python package, distributed under the MIT
    License. The JAMS Python package originates from the former UFZ Python library,
    Department of Computational Hydrosystems, Helmholtz Centre for Environmental
    Research - UFZ, Leipzig, Germany.

    Copyright (c) 2014 Arndt Piayda

    Permission is hereby granted, free of charge, to any person obtaining a copy
    of this software and associated documentation files (the "Software"), to deal
    in the Software without restriction, including without limitation the rights
    to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    copies of the Software, and to permit persons to whom the Software is
    furnished to do so, subject to the following conditions:

    The above copyright notice and this permission notice shall be included in all
    copies or substantial portions of the Software.

    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    SOFTWARE.


    History
    -------
    Written,  AP, Aug 2014
    '''

    ############################################################################
    # fixed parameters
    nperiods = 4  # numer of seasons per year
    ntclass = 6  # number of temperature classes
    corrthresh = 0.3  # correlation coefficient
    nuclass = 20  # number of u* classes
    upercent = 95.  # percentage for average comparison
    udef = -9999  # undef of output

    ############################################################################
    # data and flags
    Fco2 = data[:, 0]
    ustar = data[:, 1]
    T = data[:, 2]
    flag_Fco2 = flags[:, 0]
    flag_ustar = flags[:, 1]
    flag_T = flags[:, 2]

    years, months, days, hours, mins, sc = dec2date(date, fulldate=True)
    nmons = 12 / nperiods
    yrmin = np.min(years)
    yrmax = np.max(years)
    nyears = yrmax - yrmin  ######## + 1 works only for one full year of data
    if (nyears > 1) | (np.unique(months).size < 12):
        raise ValueError(
            'ustarflag: only one full year of data can be processed')
    flag = (flag_Fco2 == 0) & (~isday) & (flag_T == 0) & (flag_ustar == 0)

    ############################################################################
    # prepare bootstrapping
    threshs = np.zeros((nboot, nyears))
    seasonal_threshs = np.zeros((nperiods * nboot, nyears))

    ############################################################################
    # calculate thresholds
    for y in xrange(yrmin,
                    yrmax):  ######## + 1 works only for one full year of data
        #print 'Year ', y
        periods = np.unique(months[np.where(years == y)])[0:-1:nmons]
        nperiods = len(np.unique(months[np.where(years == y)])) / nmons
        out_threshs = np.array([])
        nstepsyear = len(np.where(years == y)[0])
        #print nstepsyear

        ########################################################################
        # start bootstrapping
        for r in xrange(nboot):
            range_min = np.min(np.where(years == y)[0])
            range_max = np.max(np.where(years == y)[0])
            rand = np.random.randint(0, nstepsyear, size=nstepsyear)
            T_b = T[range_min + rand]
            flag_b = flag[range_min + rand]
            years_b = years[range_min + rand]
            months_b = months[range_min + rand]
            ustar_b = ustar[range_min + rand]
            Fco2_b = Fco2[range_min + rand]

            # flag the created bootstrap-arrays
            T_f = np.extract(flag_b, T_b)
            months_f = np.extract(flag_b, months_b)
            years_f = np.extract(flag_b, years_b)
            ustar_f = np.extract(flag_b, ustar_b)
            Fco2_f = np.extract(flag_b, Fco2_b)

            # container for thresholds of the seasons of a year
            period_threshs = np.array([])

            ####################################################################
            # loop over seasons
            for m in periods:
                period_flag = (months_f >= m) & (months_f < m + nmons)

                # flag data --> just night_time and unflagged data is left
                T_m = np.ma.array(T_f, mask=~period_flag, fill_value=udef)
                ustar_m = np.ma.array(ustar_f,
                                      mask=~period_flag,
                                      fill_value=udef)
                Fco2_m = np.ma.array(Fco2_f,
                                     mask=~period_flag,
                                     fill_value=udef)

                # sort the array according to the temperature
                ii = np.ma.argsort(T_m)
                T_m = T_m[ii]
                ustar_m = ustar_m[ii]
                Fco2_m = Fco2_m[ii]

                # separate into 6 temperature classes with
                # equal size according to quantiles
                class_width = np.ma.count(T_m) / ntclass
                ntlen = T_m.size

                # container for the thresholds of this period
                class_threshs = np.array([])

                ################################################################
                # loop for every temperature class
                for i in xrange(ntclass):
                    T_temp = T_m[i *
                                 class_width:np.min([(i + 1) *
                                                     class_width, ntlen - 1])]
                    u_temp = ustar_m[i *
                                     class_width:np.min([(i + 1) *
                                                         class_width, ntlen -
                                                         1])]
                    Fco2_temp = Fco2_m[i *
                                       class_width:np.min([(i + 1) *
                                                           class_width, ntlen -
                                                           1])]

                    # calculate r
                    #try: #AP
                    r_abs = np.abs(np.corrcoef(T_temp, u_temp)[0, 1])
                    #except IndexError: #AP
                    #    print 'IndexError'#AP
                    #    r_abs = 1 #AP

                    # neglect threshold, if correlation is strong
                    # online gap tool uses 0.3 as r-threshold,
                    # original papale paper: 0.4
                    if r_abs >= corrthresh:
                        continue

                    # sort the data of the temperature class according
                    # to ustar
                    ii = np.ma.argsort(u_temp)
                    T_temp = T_temp[ii]
                    u_temp = u_temp[ii]
                    Fco2_temp = Fco2_temp[ii]

                    # set the class width of the ustar classes; class
                    # with equal size??
                    u_class_width = np.ma.count(T_temp) / nuclass
                    nulen = T_temp.size

                    ############################################################
                    # loop over all ustar classses-1
                    # threshold = if class average > 99% of average of
                    # all classes above
                    # loop over all ustar classes
                    for u in xrange(nuclass - 1):
                        T_u = T_temp[u * u_class_width:np.min([(
                            u + 1) * u_class_width, nulen - 1])]
                        u_u = u_temp[u * u_class_width:np.min([(
                            u + 1) * u_class_width, nulen - 1])]
                        Fco2_u = Fco2_temp[u * u_class_width:np.min([(
                            u + 1) * u_class_width, nulen - 1])]

                        rest_Fco2 = Fco2_temp[np.min([(u + 1) *
                                                      u_class_width, nulen -
                                                      1]):]
                        rest_avg = np.ma.mean(rest_Fco2)
                        avg = np.ma.mean(Fco2_u)

                        if np.abs(avg) >= np.abs(upercent / 100. * rest_avg):
                            class_threshs = np.append(class_threshs,
                                                      np.ma.mean(u_u))
                            break

                # median of thresholds of all temperature classes is
                # set as the threshold of the period
                if ~np.isnan(np.ma.median(class_threshs)):
                    #print r
                    period_threshs = np.append(period_threshs,
                                               np.ma.median(class_threshs))
                else:
                    #print 'All are nans',class_threshs
                    period_threshs = np.append(period_threshs, udef)

            # threshholds of all periods
            out_threshs = np.append(out_threshs, period_threshs)
            # if there are values in period_thresholds take the maximum,
            # else take 90th percentil
            if period_threshs == []:
                threshs[r, y - yrmin] = np.sort(ustar_f)[np.len(ustar_f) * 9. /
                                                         10.]
            else:
                threshs[r, y - yrmin] = np.max(period_threshs)

        seasonal_threshs[0:nperiods * nboot, y - yrmin] = out_threshs

    ############################################################################
    # set the flags

    # assign calculated threshold:
    # for each year separately,
    # for night and day-time data
    # take the median of the nboot bootstrapped thresholds for each year
    # "out_threshs" used for export and later plotting

    flag_out = np.zeros_like(flag_Fco2, dtype=np.int)

    for y in range(yrmin,
                   yrmax):  ######## + 1 works only for one full year data
        if np.any(threshs[:, y - yrmin] > 10):
            print('Hu', y, threshs[:, y - yrmin])
        med = np.median(threshs[:, y - yrmin])
        if (med < min_thresh):
            med = min_thresh
        elif np.isnan(med):
            print('NaN-WARNING!')
            ii = np.argsort(ustar[years == y])
            oo = np.where(flag_Fco2[ii] == 0)[0]
            flag_out[years==y][ii[oo][::np.int(len(oo)*0.9)]]\
                    += ustar_v
        else:
            ii = (years == y).flatten()
            yy = np.where((ustar[ii] < med) & (flag_Fco2[ii] == 0))
            uu = np.clip(np.unique(np.concatenate((yy[0], yy[0] + 1))), 0,
                         len(flag_Fco2[(years == y).flatten()]) - 1)
            flag_out[np.where(years == y)[0][uu]] += ustar_v

            #print 'Year: %i, u_star thresh: %5.3f'%(y, med)

            ####################################################################
            # plot
            if plot:
                import matplotlib.pyplot as plt
                import matplotlib.backends.backend_pdf as pdf
                fig1 = plt.figure(1)
                sub1 = fig1.add_subplot(111)
                sub1.plot(ustar[np.where(flag & (years==y).flatten())],\
                          Fco2[np.where(flag & (years==y).flatten())], 'bo')
                sub1.axvline(x=med, linewidth=0.75, color='r')
                plt.ylabel('F CO2')
                plt.xlabel('u_star')
                plt.title('u_star thresh: %5.3f' % med)
                plt.show()

                pp1 = pdf.PdfPages(outdir + '/ustar.pdf')
                fig1.savefig(pp1, format='pdf')
                pp1.close()

    ############################################################################
    # save output
    np.savetxt('%s/u_star_thresh.csv' % outdir,
               threshs,
               fmt='%4.4f',
               delimiter=',')
    np.savetxt('%s/u_star_thresh_seasonal.csv' % outdir,
               seasonal_threshs,
               fmt='%4.4f',
               delimiter=',')

    return flag_out
Пример #27
0
import numpy as np
import matplotlib.pylab as plt
from scipy.optimize import leastsq



for i in range(6):
    file="/Users/anita/Desktop/new_data_1/500ms_SUN_0000"+str(i)+".txt"
    pixel=np.loadtxt(file,usecols=(0,))
    intensity=np.loadtxt(file,usecols=(1,))
    for j in range(6):
        mean=np.sum(pixel)/np.len(pixel)
print mean
Пример #28
0
            ax.plot(trpp_down['x'], trpp_down['y'], 'r')
            ax.plot([-1, -1], [max(copss_lost), max(cntrpss_lost)], 'k--')
        ax.set_title(r'E={:.2f} keV'.format(Ekev))
        ax.set_xlabel(r'P$_\phi$/$\psi_w$')
        ax.set_ylabel(r'$\mu\frac{B_0}{E}$')
        ax.set_ylim([0, 1.5])
        ax.set_xlim([-2, 1.])
        ax.grid('on')

        ## filling bewteen lines
        ax.fill_between(boundaries['min']['tr_up']['x'], boundaries['min']['tr_up']['y'],\
                        boundaries['max']['tr_up']['y'], color='r', alpha=0.6)
        ax.fill_between(boundaries['min']['x'], boundaries['min']['cntr'],\
                        boundaries['max']['cntr'], color='k', alpha=0.6)
        f.tight_layout()
        ax.set_xlim([-1.2, 0.])
        ax.set_ylim([1., 1.5])
        f.savefig('boundaries_ripple_{:s}_E{:.2f}.png'.format(run, Ekev),
                  dpi=800)

    return b, B0, R0, boundaries


if np.len(sys.argv) == 4:
    fname_a5 = sys.argv[1]
    run = sys.argv[2]
    Ekev = float(sys.argv[3])
    main(fname_a5, run, Ekev, 0, 1)
else:
    print('not enough input')
import numpy as np
import matplotlib.pylab as plt

time=np.arange(0,60,0.05)
nt=np.len(time)


theta=np.zeros(nt)
v=np.zeros(nt)

theta[0]=1.0
v[0]=1.0

while i < nt-1:


        


#theta vs time plot
plt.plot(time,theta)
#energy vs time plot
plt.plot(time,energy)
plt.title('')
plt.xlabel('')
plt.ylabel('')
plt.show()
Пример #30
0
def record_data(network, data_array) :
    """take single iteration snapshot of the rate of communication"""
    for n in network.nodes() :
        data_array[n] = np.len(network.node[n]['information shared with'])