Esempio n. 1
0
def delay(omegaV, free):
	E_0 = -1.1591
	E_m = [-0.8502, -0.3278, -0.1665]
	T = 3/0.02419
	delta_m = np.zeros(np.size(E_m))
	for j in (0, 1, 2):
	    delta_m[j] = E_m[j] - E_0
	optSize = np.size(omegaV)
	alpha1f = np.zeros(optSize, 'complex')
	alpha3f = np.zeros(optSize, 'complex')
	realFactor = np.zeros(optSize, 'complex')
	imagFactor = np.zeros(optSize, 'complex')
	T = 3 / 0.02419

	for i in range(0, np.size(defs.omega_dip)-1):
	    alpha1f[i] = dipole.constructDipole(omegaV[i], defsDipole.wf1)
	    alpha3f[i] = dipole.constructDipole(omegaV[i], defsDipole.wf3)

	dawsArg1st = T*(delta_m[0] - omegaV)
	dawsArg3rd = T*(delta_m[1] - omegaV)
	realFactor = np.real((alpha1f * np.exp(-free*np.power(dawsArg1st,2.0)))\
            + (alpha3f * np.exp(-np.sqrt(free)*np.power(dawsArg3rd, 2.0))))

	imagFactor = (alpha1f * ((-2 * cmath.sqrt(-1))\
            / (np.sqrt(np.pi))) * spcl.dawsn(free*dawsArg1st))\
                + (alpha3f * ((-2 * cmath.sqrt(-1))\
                    / (np.sqrt(np.pi))) * spcl.dawsn(np.sqrt(free)*dawsArg3rd))

	dE = np.gradient(omegaV)
	dIm = np.gradient(imagFactor, dE)
	dRe = np.gradient(realFactor, dE)
	derivFactor = (realFactor * dIm) - (np.imag(imagFactor * dRe))
	zSquared = realFactor*realFactor + np.imag(imagFactor)*np.imag(imagFactor)
	ans =  (realFactor * np.imag(dIm) - np.imag(imagFactor * dRe)) / zSquared
	return ans
Esempio n. 2
0
    def getEnergy(self, normalized=True, mask=None):
        """
        Returns the current energy.

        Parameters:
            normalized: Flag to return the normalized energy (that is, divided
            by the total density)
        """
        if self.gpu:
            psi = self.psi.get()
            V = self.Vdt.get() / self.dt
        else:
            psi = self.psi
            V = self.Vdt.get() / self.dt
        density = np.absolute(psi) ** 2
        gradx = np.gradient(psi)[0]
        normFactor = density.sum() if normalized else 1.0
        return (
            np.ma.array(
                -(
                    0.25 * np.gradient(np.gradient(density)[0])[0]
                    - 0.5 * np.absolute(gradx) ** 2
                    - (self.g_C * density + V) * density
                ),
                mask=mask,
            ).sum()
            / normFactor
        )
Esempio n. 3
0
def _filter_small_slopes(hgt, dx, min_slope=0):
    """Masks out slopes with NaN until the slope if all valid points is at 
    least min_slope (in degrees).
    """

    min_slope = np.deg2rad(min_slope)
    slope = np.arctan(-np.gradient(hgt, dx))  # beware the minus sign
    # slope at the end always OK
    slope[-1] = min_slope

    # Find the locs where it doesn't work and expand till we got everything
    slope_mask = np.where(slope >= min_slope, slope, np.NaN)
    r, nr = label(~np.isfinite(slope_mask))
    for objs in find_objects(r):
        obj = objs[0]
        i = 0
        while True:
            i += 1
            i0 = objs[0].start-i
            if i0 < 0:
                break
            ngap =  obj.stop - i0 - 1
            nhgt = hgt[[i0, obj.stop]]
            current_slope = np.arctan(-np.gradient(nhgt, ngap * dx))
            if i0 <= 0 or current_slope[0] >= min_slope:
                break
        slope_mask[i0:obj.stop] = np.NaN
    out = hgt.copy()
    out[~np.isfinite(slope_mask)] = np.NaN
    return out
    def test_1d_gaussian_slope_error(self):

        mirror_length=200.0
        step=1.0
        rms_slopes=1.3e-7
        rms_heights = 1e-7
        correlation_length = 10.0

        x, f = simulate_profile_1D_gaussian(step=step, \
                                              mirror_length=mirror_length, \
                                              rms_heights=rms_heights, \
                                              correlation_length=correlation_length,\
                                              renormalize_to_slopes_sd=None)
        slopes = numpy.gradient(f, x[1]-x[0])
        print("test_1d_gaussian: test function: %s, Stdev (not normalized): HEIGHTS=%g.SLOPES=%g"%("test_1d_gaussian_slope_error",f.std(),slopes.std()))

        x, f = simulate_profile_1D_gaussian(step=step, \
                                              mirror_length=mirror_length, \
                                              rms_heights=rms_heights, \
                                              correlation_length=correlation_length,\
                                              renormalize_to_slopes_sd=rms_slopes)
        slopes = numpy.gradient(f, x[1]-x[0])

        print("test_1d_gaussian: test function: %s, SLOPES Stdev (normalized to %g)=%g"%("test_1d_gaussian_slope_error",rms_slopes,slopes.std()))
        assert numpy.abs( rms_slopes - slopes.std() ) < 0.01 * numpy.abs(rms_slopes)

        if do_plot:
            from srxraylib.plot.gol import plot
            plot(x,slopes,title="test_1d_gaussian_slope_error",xtitle="Y",ytitle="slopes Z'")
def get_beta_deff(savepath, filename):
  filepath = os.path.join(savepath,filename)
  with open(filepath,'r') as f1:
    #col1 = [] #typically mean x-position
    #col2 = [] #typically mean (x-position)^2
    col3 = [] #typically MSD-x as calculated from the above quantities
    for line in f1:
      ls = line.split()
      #Analytical data output is currently set at 3 cols.
      #Make sure you are using the correct data. 
      #col1 += [float(ls[0])]
      #col2 += [float(ls[1])]
      col3 += [float(ls[2])]

  times = np.array(range(1, len(col3) + 1)) #create an array of times
  msd_x = np.array(col3) #create an array of msd_x

  #For effective diffusion:
  deff  = np.true_divide(msd_x, 2*times)

  #For beta(t) plot.
  log_times = np.log10(times)
  log_msd_x = np.log10(msd_x)
  dt = np.gradient(log_times)
  dd = np.gradient(log_msd_x, dt)

  return times, deff, log_times, dd, msd_x
Esempio n. 6
0
File: AGS.py Progetto: fimay/hedp
    def _method_df2(nu, op, alpha=1.0, beta=0.5, nmin_filter=10, log=True):
        """
        Cost function: Use normalized first order derivative

        Parameters:
        -----------
            - nu [ndarray]: photon group boundaries
            - op [ndarray]: spectra
            - alpha [float]: in [0.0, 2.0], default 1.0
                        alpha < 1: low sensitivity to gradients in the spectra
                        alpha > 1: high sensitivity to gradients in the spectra
        """
        from scipy.ndimage.filters import minimum_filter1d
        if log:
            err = np.abs(np.gradient(np.gradient(np.log10(op))))
        else:
            err = np.abs(np.gradient(np.gradient(op)))
        err /= err.max()  # normalising to 1 so pow gives predictive results
        err = err**alpha
        if not log:
            err /= minimum_filter1d(op, nmin_filter)
        err /= err.sum()
        err_f = (err*(1-beta)+beta/len(op))
        err_f /= err_f.sum()
        return err_f
Esempio n. 7
0
def spline_do(x, y, ax, k=3, s=7, n=1, diff=True, plot=True, error=False):
    '''
    Finds  spline with degree k and smoothing factor s for x and y.
    Returns ys,d_ys: the y values of the spline and the derivative.

    '''
    # xs = np.array(x).astype(float)
    s = UnivariateSpline(x, y, k=k, s=s)
    xs = np.linspace(x.values.min(), x.values.max(), len(x))
    ys = s(xs)
    if plot:
        if not error:
            ax.plot(x, norm(y), '.', label='Data')
        ax.plot(xs, norm(ys), label='Spline fit')
        if error:
            ax.errorbar(x, norm(y), yerr=np.sqrt(norm(y)))

    d_ys = np.gradient(ys)
    dd_ys = np.gradient(d_ys)
    if diff:
        if plot:
            ax.plot(xs, norm(d_ys), '-.', label='1st derivative')
            #ax.plot(xs, norm(dd_ys),'--',label='2nd derivative')
            ax.plot(xs, norm(smooth(dd_ys, 81)),
                    '--', label='2nd derivative (smothed)')
    if plot:
        ax.legend(loc=0)
    return xs, ys, d_ys, dd_ys
Esempio n. 8
0
def main(args):
    fig = plt.figure()
    axU = fig.add_subplot(211)
    axR = fig.add_subplot(212)

    wall = createobstacle(args.diameter[0],111)
    lat = lb_D2Q9.lattice(wall.shape,[0.,0.],args.tau[0])
    lat.ux[:,0]=anSolution(5e-8,111,lat.nu) #set intel macroscopic eq vel
    lat.fd = lat.eqdistributions()#New equilibrium
    lat.setWalls(wall)
    finlet = np.copy( lat.fd[:,:,:1]) #get inlet macroscopic vel
    lat.updateMacroVariables()

    for i in range(400000):
        if i%50 == 0:
            axU.cla()
            axU.imshow(np.sqrt(lat.ux**2+lat.uy**2),vmin=0,vmax=0.2)
            fname = '%08d.png'%i
            dyUy,dxUy=np.gradient(lat.uy)
            dyUx,dxUx=np.gradient(lat.ux)
            vort=np.abs(dxUy-dyUx)
            axR.cla()
            axR.imshow(vort,vmin=0,vmax=0.1)
            fig.savefig('tot'+fname)
        
        lat.collision()
        lat.streamming()
        lat.onWallBBNS_BC(wall)
        lat.fd[:,:,-1]=np.copy(lat.fd[:,:,-2]) #gradient to cero on outlet
        lat.fd[:,:,:1]=np.copy(finlet)
        lat.updateMacroVariables()
    np.savez('out.npz',lat.ux,lat.uy,lat.rho,wall)
Esempio n. 9
0
 def airfoil(self):
     """Biconvex airfoil"""
     L = self.L
     N = self.N
     R = 210
     theta_arc = np.arcsin(L/2 / R) * 2
     pi = np.pi
     theta_up = np.linspace((pi+theta_arc) / 2,
                            +(pi-theta_arc) / 2, N)
     #theta_up = theta[::-1]
     theta_down =np.linspace((3*pi-theta_arc) / 2,
                            +(3*pi+theta_arc) / 2, N)
     
     X_up = R * np.cos(theta_up)
     Y_up = R * np.sin(theta_up)
     X_down = R * np.cos(theta_down)
     Y_down = R * np.sin(theta_down)
     
     shift_r = X_up[0]
     X_up -= shift_r
     X_down -= shift_r
     shift_up = Y_up[0]
     shift_down = Y_down[0]
     Y_up = Y_up - shift_up
     Y_down = Y_down - shift_down
     
     X = np.concatenate((X_up, X_down))
     Y = np.concatenate((Y_up, Y_down))
     slope_up = np.gradient(Y_up,1)/np.gradient(X_up,1)
     slope_down = np.gradient(Y_down,1)/np.gradient(X_down,1)
     angle = np.arctan(np.concatenate((slope_up, slope_down)))
     return X, Y, angle
Esempio n. 10
0
    def __init__(self, sigReaders):
        self.sigReaders = sigReaders

        sigsTimeValues = [sr.getSignal() for sr in self.sigReaders]
        sigsValues = [values for times, values in sigsTimeValues]
        self.sigsRanges = [(min(values), max(values), min(np.gradient(values)), max(np.gradient(values))) for values in
                           sigsValues]
Esempio n. 11
0
def vorticity(x, y, u, v, coord_type='geographic'):
    """
    USAGE
    -----
    zeta = vorticity(x, y, u, v, coord_type='geographic')

    Calculates the vertical component 'zeta' (dv/dx - du/dy, in 1/s) of the
    relative vorticity vector from the 'u' and 'v' velocity arrays (in m/s)
    specified in spherical coordinates by the 'lon' and 'lat' 2D meshgrid-type
    arrays (in degrees).
    """
    x, y, u, v = map(np.array, (x, y, u, v))

    if coord_type=='geographic':
        dx, dy = deg2m_dist(lon, lat)
    elif coord_type=='cartesian':
        dy, _ = np.gradient(y)
        _, dx = np.gradient(x)
    elif coord_type=='dxdy':
        dx, dy = x, y
        pass

    duy, _ = np.gradient(u)
    _, dvx = np.gradient(v)

    dvdx = dvx/dx
    dudy = duy/dy
    vrt = dvdx - dudy # [1/s]

    return vrt
Esempio n. 12
0
def kalman_smooth_dataframe(df, arena=None, smooth=True):
    if arena:
        fsx = arena.scale_x
        fsy = arena.scale_y
    else:
        fsx = fsy = lambda _v: _v

    #we need dt in seconds to calculate velocity. numpy returns nanoseconds here
    #because this is an array of type datetime64[ns] and I guess it retains the
    #nano part when casting
    dt = np.gradient(df.index.values.astype('float64')/SECOND_TO_NANOSEC)

    if smooth:
        print "\tsmoothing (%r)" % arena
        #smooth the positions, and recalculate the velocitys based on this.
        kf = Kalman()
        smoothed = kf.smooth(df['x'].values, df['y'].values)
        _x = fsx(smoothed[:,0])
        _y = fsy(smoothed[:,1])
    else:
        _x = fsx(df['x'].values)
        _y = fsy(df['y'].values)

    _vx = np.gradient(_x) / dt
    _vy = np.gradient(_y) / dt
    _v = np.sqrt( (_vx**2) + (_vy**2) )

    df['x'] = _x
    df['y'] = _y
    df['vx'] = _vx
    df['vy'] = _vy
    df['v'] = _v

    return dt
Esempio n. 13
0
def read_HRRS_data(ff):

	"""
	Read in a .dat file from SPARC high-res radiosonde data 
	Input ff is a string pointing to the full path of the desired file. 
	"""

	# here is a dict that gives bad values for different columns 
	# alert: this is still incomplete 
	badvals = {'Temp':['999.0'],'Alt':['99.0','99999.0'],'Lat':['999.000'],'Lon':['9999.000']}
	
	D= pd.read_csv(ff,skiprows=13,error_bad_lines=False,delim_whitespace=True,na_values=badvals)
	colnames=list(D.columns.values)

	# kick out the first two rows - they hold units and symbols 
	D.drop(D.index[[0,1]], inplace=True)

	# also make sure that lat, lon, pressure, altitude, and temp are numerics 
	vars_to_float = ['Press','Temp','Lat','Lon','Alt']
	D[vars_to_float] = D[vars_to_float].astype(float)

	# compute the vertical gradient of potential temp and, from that, buoyancy frequency 
	P0=1000.0
	Rd = 286.9968933                # Gas constant for dry air        J/degree/kg
	g = 9.80616                     # Acceleration due to gravity       m/s^2
	cp = 1005.0                     # heat capacity at constant pressure    m^2/s^2*K
	theta=(D['Temp']+273.15)*(P0/D['Press'])**(Rd/cp)		# note that this includes conversion of Celsius to Kelvin  
	dZ = np.gradient(D['Alt']) 
	dthetadZ = np.gradient(theta,dZ)
	D["N2"]=(g/theta)*dthetadZ
	

	return(D)
Esempio n. 14
0
def wrf_pv( U, V, F, THETA, PRES, MAPFAC_M, dx ):
    """Calculate the potential vorticity given the U and V vector components in m/s,
    the Coriolis sine latitude term (F) in s^-1, THETA potential temperature in degrees
    Kelvin, PRES pressure in hPa or mb, the map scale factor on mass grid and the gridspacing 
    dx in meters. U, V, F, THETA, and PRES must be 4D arrays.
    ---------------------
    U (numpy.ndarray): ndarray of U vector values in m/s
    V (numpy.ndarray): ndarray of V vector values in m/s
    F (numpy.ndarray): ndarray of Coriolis sine latitude values in s^-1
    THETA (numpy.ndarray): ndarray of potential temperature in degrees Kelvin
    PRES (numpy.ndarray): ndarray of pressure in hPa or mb same shape as THETA
    MAPFAC_M (numpy.ndarray): 2D of map scale factor on mass grid. 
    dx (float or int): float or integer of U and V grispacing in meters
    ---------------------
    returns:
        numpy.ndarray of potential vorticity values in ( K * m^2 * kg^-1 * s^-1 ) * 10^6 
        ( or 1 PVU * 10^6).
    """
    assert U.shape == V.shape == THETA.shape == PRES.shape, 'Arrays are different shapes. They must be the same shape.'
    ## pres in hPa needs to convert to Pa
    PRES = PRES * 100
    dx = dx * MAPFAC_M
    dy = dx
    grav = 9.8

    dVt,dVp,dVy,dVx = numpy.gradient( V )
    dUt,dUp,dUy,dUx = numpy.gradient( U )
    dTt,dTp,dTy,dTx = numpy.gradient( THETA )
    dPt,dp,dPy,dPx = numpy.gradient( PRES )
    return ( -grav * ( -dVp/dp * dTx/dx + dUp/dp * dTy/dy + ( dVx/dx - dUy/dy + F ) * dTp/dp ) ) * pow(10, 6)
Esempio n. 15
0
def extractAllDescriptors(signal):
    """
    Extracts the descriptors expected for the analysis of a given audio file.
    """

    described = {}

    described['Silence'] = _silence = silence(signal)
    signal = signal[config.hopSize * _silence[0]:config.hopSize * _silence[1]] / np.max(
        signal)  # Tomo solo la parte con sonido y promedio para que todas las señales sean parejas.
    described['mfcc'] = mfccs(signal)
    described['Inharmonicity'] = inharmonicity_tesis(signal)
    described['Energy'] = energy(signal)
    described['LogAttackTime'] = log_attack_time(signal)
    described['Standard-Dev'] = standard_dev(signal)
    described['Variance'] = variance(signal)
    described['Skewness'] = skewness(signal)
    described['kurtosis'] = kurtosis(signal)

    # described['mfcc-1st'] = np.gradient(described['mfcc'])[1]
    # described['mfcc-2nd'] = np.gradient(described['mfcc-1st'])[1]
    described['Inharmonicity-1st'] = np.gradient(described['Inharmonicity'])
    described['Inharmonicity-2nd'] = np.gradient(described['Inharmonicity-1st'])

    described['mfcc-Std-f'], described['mfcc-Var-f'], described['mfcc-Skew-f'], described['mfcc-Kurt-f']\
        = mfcc_std_frequency(described)

    return described
Esempio n. 16
0
 def calculate_single_weight(self, angle):
     """Finds the strongest gradient in the image"""
     obj = ShearTool(self.img)
     corrected = obj.shear(angle)
     p = BinaryProjection(corrected, Projection.TYPE_HORIZONTAL)
     # p.debug()
     return max(np.gradient(p.get_projection())) + abs(min(np.gradient(p.get_projection())))
Esempio n. 17
0
def march(x,u_e,nu):
    dx = numpy.diff(x)
    du_e = numpy.gradient(u_e,numpy.gradient(x))
    delta = numpy.full_like(x,0.)
    lam = numpy.full_like(x,lam0)

    # Initial conditions must be a stagnation point. If u_e[0]>0
    # assume stagnation is at x=0 and integrate from x=0..x[0].
    if u_e[0]<0.01:                     # stagnation point
        delta[0] = numpy.sqrt(lam0*nu/du_e[0])
    elif x[0]>0:                        # just downstream
        delta[0] = numpy.sqrt(lam0*nu*x[0]/u_e[0])
        delta[0] += 0.5*x[0]*g_pohl(delta[0],0,u_e,du_e,nu)
        lam[0] = delta[0]**2*du_e[0]/nu
    else:
        raise ValueError('x=0 must be stagnation point')

    # march!
    for i in range(len(x)-1):
        delta[i+1] = heun(g_pohl,delta[i],i,dx[i],
                          u_e,du_e,nu)  # ...additional arguments
        lam[i+1] = delta[i+1]**2*du_e[i+1]/nu

        if lam[i+1] < -12: i-=1; break  # separation condition

    return delta,lam,i+1                # return with separation index
Esempio n. 18
0
def hessian_matrix(image, sigma=1, mode="constant", cval=0):
    """Compute Hessian matrix.

    The Hessian matrix is defined as::

        H = [Hxx Hxy]
            [Hxy Hyy]

    which is computed by convolving the image with the second derivatives
    of the Gaussian kernel in the respective x- and y-directions.

    Parameters
    ----------
    image : ndarray
        Input image.
    sigma : float
        Standard deviation used for the Gaussian kernel, which is used as
        weighting function for the auto-correlation matrix.
    mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
        How to handle values outside the image borders.
    cval : float, optional
        Used in conjunction with mode 'constant', the value outside
        the image boundaries.

    Returns
    -------
    Hxx : ndarray
        Element of the Hessian matrix for each pixel in the input image.
    Hxy : ndarray
        Element of the Hessian matrix for each pixel in the input image.
    Hyy : ndarray
        Element of the Hessian matrix for each pixel in the input image.

    Examples
    --------
    >>> from skimage.feature import hessian_matrix
    >>> square = np.zeros((5, 5))
    >>> square[2, 2] = 4
    >>> Hxx, Hxy, Hyy = hessian_matrix(square, sigma=0.1)
    >>> Hxy
    array([[ 0.,  0.,  0.,  0.,  0.],
           [ 0.,  1.,  0., -1.,  0.],
           [ 0.,  0.,  0.,  0.,  0.],
           [ 0., -1.,  0.,  1.,  0.],
           [ 0.,  0.,  0.,  0.,  0.]])
    """

    image = img_as_float(image)

    gaussian_filtered = ndi.gaussian_filter(image, sigma=sigma, mode=mode, cval=cval)

    gradients = np.gradient(gaussian_filtered)
    axes = range(image.ndim)
    H_elems = [np.gradient(gradients[ax0], axis=ax1) for ax0, ax1 in combinations_with_replacement(axes, 2)]

    if image.ndim == 2:
        # The legacy 2D code followed (x, y) convention, so we swap the axis
        # order to maintain compatibility with old code
        H_elems.reverse()
    return H_elems
Esempio n. 19
0
def getInflectionPoints(a, smoonum=4,sign='negative'):
    """
    Returns all of the negative or positive inflection points in a plot by first
    smoothing it 4 times with a running mean and then looking for points where 
    the second gradient is zero and the first gradient is either postive or negative.
    Inputs:
        a - an array of numbers
    Key-Word Arguments (kwargs):
        smoonum = 4 - number of times to smooth the array to remove noise. Default
            set to 4. 
        sign = 'negative' - specifies whether to look for positive or negative 
            inflection points. 
    """
    smoo = a.copy()
    for i in range(smoonum):
        smoo = smoothed(smoo)
    concav = 10*np.gradient(np.gradient(smoo))
    Zx,Zy = getZeroes(concav)
    aberr = np.gradient(concav)
    retX,retY = [],[]

    for x in Zx:
        if sign=='negative':
            if aberr[x]<0:
                retX.append(x)
                retY.append(a[x])
        elif sign=='positive':
             if aberr[x]>0:
                retX.append(x)
                retY.append(a[x])
        else:
            retX.append(x)
            retY.append(a[x])
    return retX,retY
def draw(x, y, x_, x_fit, y_interp, group, samplenum, result):
    #
    #start to draw the figs
    y_df = np.gradient(y)
    y_df2 = np.gradient(y_df)
    fig = plt.figure(figsize=(20, 10), dpi=50, facecolor='white')
    fig.suptitle(samplenum)
    ax = fig.add_subplot(121)
    ax.plot(np.log10(x), y)
    ax.plot(np.log10(x), y_df)
    ax.plot(np.log10(x), y_df2)
    #ax.set_xscale('log')
    #ax.set_xlim(0.1, 1000)
    ax1 = fig.add_subplot(122)
    ax1.scatter(x, y, c='red')
    #ax1.plot(10**x_fit, y_interp)
    ax1.plot(x_, func(x_fit, result.params))
    ax1.set_xscale('log')
    ax1.set_xlim(0.1, 10**3)
    ax1.set_ylim(0, y.max()*1.2)

    for i in np.arange(group):
        p = 'p'+str(i)
        a = 'a'+str(i)
        b = 'b'+str(i)
        para = [result.params[p].value, result.params[a].value, result.params[b].value]
        ax1.plot(x_,single(x_fit, para))
    #ax1.plot(10**x_fit, y_-sum(y_test))
    return fig
Esempio n. 21
0
def setupdetectorresponse(\
         BSgridA, BSgridB, BSgridC, BSgridD,
         nxi, nyi, dnx, dny):

    # These are rules for interpolating the response functions
    Arule = scipy.interpolate.interp2d(nxi, nyi, BSgridA.T, kind='linear')
    Brule = scipy.interpolate.interp2d(nxi, nyi, BSgridB.T, kind='linear')
    Crule = scipy.interpolate.interp2d(nxi, nyi, BSgridC.T, kind='linear')
    Drule = scipy.interpolate.interp2d(nxi, nyi, BSgridD.T, kind='linear')

    # Calculating the gradients of the response functions
    KAx, KAy = np.gradient(BSgridA); KAy=KAy/dny; KAx = KAx/dnx
    KBx, KBy = np.gradient(BSgridB); KBy=KBy/dny; KBx = KBx/dnx
    KCx, KCy = np.gradient(BSgridC); KCy=KCy/dny; KCx = KCx/dnx
    KDx, KDy = np.gradient(BSgridD); KDy=KDy/dny; KDx = KDx/dnx

    # These are rules for interpolating the response functions
    KAxrule = scipy.interpolate.interp2d(nxi, nyi, KAx.T, kind='linear')
    KAyrule = scipy.interpolate.interp2d(nxi, nyi, KAy.T, kind='linear')
    KBxrule = scipy.interpolate.interp2d(nxi, nyi, KBx.T, kind='linear')
    KByrule = scipy.interpolate.interp2d(nxi, nyi, KBy.T, kind='linear')
    KCxrule = scipy.interpolate.interp2d(nxi, nyi, KCx.T, kind='linear')
    KCyrule = scipy.interpolate.interp2d(nxi, nyi, KCy.T, kind='linear')
    KDxrule = scipy.interpolate.interp2d(nxi, nyi, KDx.T, kind='linear')
    KDyrule = scipy.interpolate.interp2d(nxi, nyi, KDy.T, kind='linear')

    return \
    Arule, Brule, Crule, Drule,\
    KAxrule, KAyrule, KBxrule, KByrule, KCxrule, KCyrule, KDxrule, KDyrule
Esempio n. 22
0
def PlotTSUMU():
  import matplotlib.pyplot as plt
  domain = history[:, 0]

  plot_deriv = True

  if plot_deriv:
    # plot derivative of norm...
    deriv = np.gradient(history[:,3])
    deriv2 = np.gradient(deriv)

    # plt.plot(domain, history[:, 3], color='blue') # Norm of S0
    plt.plot(cjr_data, color='black')
    # plt.plot(domain, deriv2, color='red')
    plt.plot(cjr_detections, np.ones(len(cjr_detections)), marker='o', color='r', ls='')


    # from scipy.interpolate import Rbf, InterpolatedUnivariateSpline
    # rbf = Rbf(domain, deriv)


    # plt.show()
    # return

  plt.plot(domain, history[:, 1], color='red') # Minimum of first window
  plt.plot(domain, history[:, 2], color='green') # Maximum of second window
  plt.plot(domain, history[:, 3], color='blue') # Norm of S0

  # Plot the detections.
  plt.plot(detections, np.ones(len(detections)) * 500, marker='o', color='r', ls='')

  # Plot the thresholds.
  plt.plot(domain, np.ones(len(domain)) * T1, color='#aadddd') # Minimum must be lower
  plt.plot(domain, np.ones(len(domain)) * T2, color='#ddaadd') # Maximum must be higher
  plt.show()
Esempio n. 23
0
def exportGradientImage(sigma=3.0):
	x, y = 5000, 3500
	size = 2000
	topleft = (x, y)
	bottomright = (x+size, y+size)
	image = getImageByName(imagefilename='../resources/images/registered-to-2008-07-24-09_55.tif',
						   topleft=topleft,
						   bottomright=bottomright)
	smooth = vigra.filters.gaussianSmoothing(image, sigma)
	smoothswap = smooth.swapaxes(0, 1)
	m, n = vigra.Image((2000,2000)), vigra.Image((2000,2000))
	
	for i in range(size):
		grad = np.gradient(smooth[i])
		for j in range(len(grad)):
		    m[i][j] = grad[j]
		    
	for i in range(size):
		grad = np.gradient(smoothswap[i])
		for j in range(len(grad)):
		    n[j][i] = grad[j]
		    
	out = m + n
	vigra.impex.writeImage(vigra.colors.linearRangeMapping(out), '/home/max/Desktop/out.png')
	vigra.impex.writeImage(vigra.colors.linearRangeMapping(m), '/home/max/Desktop/m.png')
	vigra.impex.writeImage(vigra.colors.linearRangeMapping(n), '/home/max/Desktop/n.png')
	
	return smooth
Esempio n. 24
0
def rect_guess(self, data, x=None, **kwargs):
    if x is None:
        return
    ymin, ymax = min(data), max(data)
    xmin, xmax = min(x), max(x)

    ntest = min(2, len(data)/5)
    step_up =  (data[:ntest].mean() > data[-ntest:].mean())


    dydx = savitzky_golay(np.gradient(data)/np.gradient(x), 5, 2)
    cen1 = x[np.where(dydx==dydx.max())][0]
    cen2 = x[np.where(dydx==dydx.min())][0]
    if step_up:
        center1 = cen1 # + (xmax+xmin)/4.0)/2.
        center2 = cen2 # + 3*(xmax+xmin)/4.0)/2.
    else:
        center1 = cen2 # + (xmax+xmin)/4.0)/2.0
        center2 = cen1 # + 3*(xmax+xmin)/4.0)/2.0

    pars = self.make_params(amplitude=(ymax-ymin),
                            center1=center1, center2=center2)

    pars['%ssigma1' % self.prefix].set(value=(xmax-xmin)/5.0, min=0.0)
    pars['%ssigma2' % self.prefix].set(value=(xmax-xmin)/5.0, min=0.0)
    return update_param_vals(pars, self.prefix, **kwargs)
Esempio n. 25
0
def _optimize_num_overlap_pixels(data):
    """
    """
    num_projection, num_slices, num_pixels = data.shape
    if num_projection % 2 != 0: # if odd
        img_first_half = np.squeeze(data[1:num_projection/2 + 1, num_slices/2, :])
        img_second_half = np.squeeze(data[num_projection/2:num_projection - 1, num_slices/2, :])
    else:
        img_first_half = np.squeeze(data[1:num_projection/2 + 1, num_slices/2, :])
        img_second_half = np.squeeze(data[num_projection/2:num_projection, num_slices/2, :])
    ind = range(0, num_pixels)[::-1]
    img_second_half = img_second_half[:, ind]
    
    img_first_half = ndimage.filters.gaussian_filter(img_first_half, sigma=2)
    img_second_half = ndimage.filters.gaussian_filter(img_second_half, sigma=2)
    
    gx1, gy1 = np.gradient(img_first_half)
    gx2, gy2 = np.gradient(img_second_half)
    img_first_half = np.power(gx1, 2) + np.power(gy1, 2)
    img_second_half = np.power(gx2, 2) + np.power(gy2, 2)
    
    img1 = np.fft.fft(img_first_half)
    img2 = np.fft.fft(img_second_half)
    tmp = np.real(np.fft.ifft(np.multiply(np.conj(img2), img1)))
    return np.argmax(np.sum(np.abs(tmp), axis=0))
Esempio n. 26
0
def deblurImage(blurImg,iteration,mode=0):
    niter=0
    edge=getEdge(blurImg*255,mode)
    stDesv=np.std(edge)
    grady, gradx=np.gradient((blurImg*255))
    deblurImg=np.copy(blurImg)
    normalizar=False
    #print "Gradiente antes", np.sum(gradx+grady) 
    desv=np.std(deblurImg)
    extraGain=1.0
    while(niter<iteration):
        desv=np.std(deblurImg)
        #Dprint "Desviacion estandar borrosa", desv
        for j in range(deblurImg.shape[0]-1):
            for k in range(deblurImg.shape[1]-1):
                gain=gradx[j,k]*stDesv+grady[j,k]*stDesv
                if(gain<extraGain):
                    extraGain=gain
                deblurImg[j,k]=deblurImg[j,k]+gain
                if(deblurImg[j,k]<0.0 or deblurImg[j,k]>255.0):
                    normalizar=True
        deblurImg=extraGain/10.0+deblurImg
        if normalizar:
            normalize(deblurImg)
        
        edge=getEdge(deblurImg,mode)
        stDesv=np.std(edge)
        niter=niter+1
    gradx2, grady2=np.gradient(deblurImg)
    return deblurImg
Esempio n. 27
0
def propagate_tie(mu, delta, pixel_size, dist):
    """
    Propagate emitting x-ray wave based on Transport of Intensity.

    Parameters
    ----------
    mu : ndarray, optional
        3D tomographic data for attenuation.
    delta : ndarray
        3D tomographic data for refractive index.
    pixel_size : float
        Detector pixel size in cm.
    dist : float
        Propagation distance of the wavefront in cm.

    Returns
    -------
    ndarray
        3D propagated tomographic intensity.
    """
    i1 = np.exp(-mu)
    i2 = np.zeros(delta.shape)
    for m in range(delta.shape[0]):
        dx, dy = np.gradient(delta[m], pixel_size)
        d2x, _ = np.gradient(i1[m] * dx, pixel_size)
        _, d2y = np.gradient(i1[m] * dy, pixel_size)
        i2[m] = i1[m] + dist * (d2x + d2y)
    return i2
Esempio n. 28
0
File: hmm.py Progetto: emb2162/sima
def _threshold_gradient(im):
    """Indicate pixel locations with gradient below the bottom 10th percentile

    Parameters
    ----------
    im : array
        The mean intensity images for each channel.
        Size: (num_channels, num_rows, num_columns).

    Returns
    -------
    array
        Binary values indicating whether the magnitude of the gradient is below
        the 10th percentile.  Same size as im.

    """

    if im.shape[0] > 1:
        # Calculate directional relative derivatives
        _, g_x, g_y = np.gradient(np.log(im))
    else:
        # Calculate directional relative derivatives
        g_x, g_y = np.gradient(np.log(im[0]))
        g_x = g_x.reshape([1, g_x.shape[0], g_x.shape[1]])
        g_y = g_y.reshape([1, g_y.shape[0], g_y.shape[1]])
    gradient_magnitudes = np.sqrt((g_x ** 2) + (g_y ** 2))
    below_threshold = []
    for chan in gradient_magnitudes:
        threshold = mquantiles(chan[np.isfinite(chan)].flatten(), [0.1])[0]
        below_threshold.append(chan < threshold)
    return np.array(below_threshold)
Esempio n. 29
0
    def get_quantum_driving_parameters(self):
        """Return the adapted parameters (eps_prime, delta, theta_prime) to
        obtain adiabatic dynamics for arbitrary length.
        """
        eps, delta = self.get_cycle_parameters()
        eps_dot, delta_dot = [np.gradient(x, self.dt) for x in eps, delta]

        mixing_angle_dot = 2.*np.abs(self.B0)*(delta*eps_dot-delta_dot*eps)
        mixing_angle_dot /= (delta**2 + 4.*np.abs(self.B0)**2*eps**2)
        self.mixing_angle_dot = mixing_angle_dot

        self.mixing_angle = np.arctan(2.*np.abs(self.B0)*eps/delta)
        self.mixing_angle_dot_alt = np.gradient(self.mixing_angle, self.dt)

        theta_prime = -2.*np.arctan2(mixing_angle_dot, (2*np.abs(self.B0)*eps))

        B_prime = (-1j * (np.exp(1j*theta_prime) + 1.) * np.pi**2 /
                   self.W**3 / np.sqrt(self.k0*self.k1))

        eps_prime = np.sqrt(4.*np.abs(self.B0)**2*eps**2 + mixing_angle_dot**2)
        eps_prime /= 2.*np.abs(B_prime)

        # avoid divergencies
        for n in (0, -1):
            eps_prime[n] = 0.0

        self.eps_prime = eps_prime
        self.delta_prime = delta
        self.theta_prime = theta_prime

        return eps_prime, delta, theta_prime
Esempio n. 30
0
def V(U):
    """ Spatial Viscous Fluxes at cell centres """
    rho, momx, momy, E = rollaxis(U, U.ndim-1) # shape to (neq,nx,ny)
    rho = rho.copy() # GASMODEL needs contiguous arrays
    u = momx/rho 
    v = momy/rho
    e = E/rho - 0.5*(u**2+v**2) # Gas internal energy (J/kg)
    T,Xi = GASMODEL.decode_conserved(rho, e)

    mu = 8.6412e-6*(T/288.16)**1.5*(398.16)/(T+110) # Sutherland viscosity law
    k = mu*14320.0/0.70 # This thing is important and hard to calculate

    dudx,dudy = gradient(u,dx,dy)
    dvdx,dvdy = gradient(v,dx,dy)
    dTdx,dTdy = gradient(T,dx,dy)
    tauxx = 2.0/3.0*mu*(2*dudx-dvdy)
    tauxy = mu*(dudy + dvdx)
    qx = -k*dTdx

    nx,ny,neq = U.shape
    C = zeros((nx,ny,neq)) # Input is cells, output is faces
    Q = rollaxis(C,C.ndim-1) # View of C with nicer indexing
    Q[1] = -tauxx
    Q[2] = -tauxy
    Q[3] = -u*tauxx -v*tauxy + qx
    return C
Esempio n. 31
0
def test_calc_lwc_gradient():
    from cloudnetpy.utils import l2norm
    ERROR_OBJ.lwc = np.ma.array([[0.1, 0.2, 0.3],
                                 [0.1, 0.3, 0.6]])
    expected = l2norm(*np.gradient(ERROR_OBJ.lwc))
    assert_array_almost_equal(ERROR_OBJ._calc_lwc_gradient(), expected)
Esempio n. 32
0
def Velocity(P_deg, framerate):
    ret = copy(P_deg)
    # Gradient in deg/sec or px/sec
    for k in ret:
        ret[k] = np.gradient(ret[k]) * framerate
    return ret

#lon = lon[-1,:,:]
#lat = lat[-1,:,:]
#latin = lat[:25,:,:]
#longin = lon[:25,:,:]

#x = np.linspace(-grid_spacing*(xdim-1)/2,grid_spacing*(xdim-1)/2,xdim)
x = np.linspace(0,grid_spacing*(xdim-1),xdim)
dx = x[1]-x[0]
#y = np.linspace(-grid_spacing*(ydim-1)/2,grid_spacing*(ydim-1)/2,ydim)
y = np.linspace(0,grid_spacing*(ydim-1),ydim)
dy = y[1]-y[0]
x, y = np.meshgrid(x,y)

dudy,dudx = np.gradient(u,dy,dx)
dvdy,dvdx = np.gradient(v,dy,dx)


s1 = np.ma.empty([ydim,xdim])
s2 = np.ma.empty([ydim,xdim])
J = np.array([[0, 1], [-1, 0]])
for i in range(ydim):
    for j in range(xdim):
        if (dudx[i,j] and dudy[i,j] and dvdx[i,j] and dvdy[i,j] and u[i,j] and v[i,j]) is not np.ma.masked:    
            Utemp = np.array([u[i, j], v[i, j]])
            Grad = np.array([[dudx[i, j], dudy[i, j]], [dvdx[i, j], dvdy[i, j]]])
            S = 0.5*(Grad + np.transpose(Grad))
            s1[i,j] = 3600*np.min(np.linalg.eig(S)[0])
            s2[i,j] = 3600*np.max(np.linalg.eig(S)[0])
Esempio n. 34
0
    def plot_score_slvol(self, ens_scr):

        #import analyse_timeseries as at; reload(at)
        #import glob
        #ts_path_names = os.path.join(cf.resultpath,cf.pism_file_name.replace(str(cf.fillnum),str("*")))
        #ensemble_members = glob.glob(ts_path_names)
        #ts_data = at.Ensemble_TimeSeries(ensemble_members)

        #print ens_scr # dict of ensemble members and individual score(s)

        #fig24, [ax24a,ax24b] = plt.subplots(2, 1,figsize=(7, 6),num=24) #sharex='col', sharey='row'
        fig24, ax24b = plt.subplots(1, 1, figsize=(9, 4),
                                    num=24)  #sharex='col', sharey='row'

        ax24a = ax24b.twinx()
        #ax24a.grid('on')

        plotgradient = False
        plottopresent = False

        if self.grvol:
            plt.title("grounded ice volume anomaly")
            ax24a.set_ylabel("(PD obs - mod) in mio km3")
            volunit = "mio. km3"
            volscale = 1.0e15
            varname = "ice_volume_glacierized_grounded"
            printname = self.printname.replace("placeholder", "grvolall")
            slvol0 = 26.2914  #16km
            varmax = 10
        else:
            #plt.title("equivalent global-mean sea-level contribution (ESL)")
            if plottopresent:
                ax24a.set_ylabel("(PD - ESL) in m sea-level equivalent")
            else:
                #ax24a.set_ylabel("(PD obs - ESL) in m sea-level equivalent")
                ax24a.set_ylabel("sea-level anomaly in m")
                ax24b.set_ylabel("sea-level relevant volume in m")
            volunit = "m SLE"
            volscale = 1.0
            varname = "slvol"
            printname = self.printname.replace("placeholder", "slvolall")
            slvol0 = self.slvol0
            varmax = 20

        ax24a.plot([-125, 0], [0, 0],
                   color="k",
                   linewidth=1,
                   linestyle="dashed")
        #ax24a.axvline(-14.35,color="k",linewidth=1,linestyle="dotted")
        #ax24a.set_xlabel("time [kyr]")
        ax24a.set_xlabel("time in kyr")
        #x24a.axis([-125,0,-varmax,varmax])
        ax24a.axis([-125, 0, -varmax / 2, varmax])
        ax24a.set_xticks(np.arange(-120, varmax, varmax))

        if True:  #include penultimate glacial cycle
            ax24a.plot([-210, 0], [0, 0],
                       color="k",
                       linewidth=1,
                       linestyle="dashed")
            #ax24a.axis([-210,0,-varmax,varmax])
            ax24a.axis([-210, 0, -varmax / 2, varmax])
            ax24a.set_xticks(np.arange(-200, 25, 25))
            #print cf.start_from_file

        if True:  #show deglaciation period
            #ax24a.plot([-15,0],[0,0],color="k",linewidth=1,linestyle="dashed")
            #ax24b.plot([-15,0],[slvol0,slvol0],color="k",linewidth=1,linestyle="dashed")
            #ax24a.axis([-15,0,-varmax/2,varmax])
            ax24b.axis([-15, 0, -varmax / 2 + slvol0, varmax + slvol0])
            #ax24a.set_xticks(np.arange(-14,2,2))
            ax24b.set_xticks(np.arange(-14, 2, 2))
            if plotgradient:
                ax24a.axis([-15, 0, -0.02, 0.02])

        #ax24a.yaxis.grid(True)
        #ax24a.grid(True, which='both')
        ax24b.set_ylim([-varmax / 2 + slvol0, varmax + slvol0])

        for i in [2529, 2533, 2534, 2535]:
            #initfile='/p/tmp/albrecht/pism18/pismOut/forcing/'
            #initfile='/p/projects/pism/albrecht/pism_paleo_ensemble/ensemble_2400-2540/'
            #TODO
            initfile = cf.resultpath2 + 'forcing' + str(
                i) + '_TPSO/results/ts_forcing_' + str(
                    cf.resolution) + 'km_' + str(cf.runtime) + 'yrs.nc'
            if os.path.exists(initfile):
                tsdata = nc.Dataset(initfile, 'r')
                slvol = np.squeeze(tsdata.variables[varname][:]) / volscale
                sltime = np.squeeze(
                    tsdata.variables["time"][:]) / cf.seconds_per_year * 1e-3
                tsdata.close()
                if plottopresent:
                    slvol0 = slvol[-1]
                if plotgradient:
                    ax24a.plot(sltime,
                               np.gradient(slvol),
                               color=cf.colorscheme[1],
                               linewidth=1,
                               zorder=2,
                               alpha=0.8)
                else:
                    #ax24a.plot(sltime,slvol0-slvol,color=cf.colorscheme[1],linewidth=1,zorder=0,alpha=0.8)
                    ax24a.plot(sltime[0:85000],
                               slvol[0:85000] - slvol0,
                               color=cf.colorscheme[1],
                               linewidth=1,
                               zorder=2,
                               alpha=0.8)
                #print i,sltime[-5000],slvol[-5000]

        ### aggreagated score normalized to probability sum(P)=1
        agg_score = {k: v[0] for k, v in ens_scr.iteritems()}
        agg_score_prop = {
            k: v / sum(agg_score.values())
            for k, v in agg_score.iteritems()
        }

        #best_score = np.nanmax(agg_score.values(),axis=0)
        best_score_mem = self.best_scores[0]
        best_score = agg_score[best_score_mem]
        worst_score = agg_score[self.best_scores[-1]]

        sl_data = {}

        #loop over ensemble members/scores
        for l, (enum, scores) in enumerate(sorted(ens_scr.items())):

            ### read each netcdf timeseries file ###############
            ts_file_name = cf.pism_file_name.replace(str(cf.fillnum),
                                                     str(enum))
            tsfile = os.path.join(cf.resultpath, ts_file_name)
            tsfile = tsfile.replace('paleo.nc',
                                    'timeseries.nc')  #'timeseries-slvol.nc'

            if os.path.exists(tsfile):
                tsdata = nc.Dataset(tsfile, 'r')
                slvol = np.squeeze(tsdata.variables[varname][:]) / volscale
                sltime = np.squeeze(
                    tsdata.variables["time"][:]) / cf.seconds_per_year * 1e-3
                tsdata.close()
                sl_data[enum] = slvol
                if plottopresent:
                    slvol0 = slvol[-1]

                ### plot sl-curve with alpha according to score
                alphamin = 0.03
                alphamax = 0.9
                alphaval = alphamax + (
                    (alphamax - alphamin) *
                    (scores[0] - best_score)) / (best_score - worst_score)
                if plotgradient:
                    ax24a.plot(sltime,
                               np.gradient(slvol),
                               color="k",
                               linewidth=0.5,
                               alpha=alphaval,
                               zorder=1)
                else:
                    #ax24a.plot(sltime,slvol0-slvol,color="k",linewidth=0.5,alpha=alphaval,zorder=1)
                    ax24a.plot(sltime[::10],
                               slvol[::10] - slvol0,
                               color="k",
                               linewidth=0.5,
                               alpha=alphaval,
                               zorder=1)

                ### best/reference run #############################
                #if enum=='1170':#reference
                if enum == best_score_mem:
                    #if enum in self.best_scores[0:3]:
                    if plotgradient:
                        ax24a.plot(sltime,
                                   np.gradient(slvol),
                                   color=cf.colorscheme[3],
                                   linewidth=2,
                                   zorder=3,
                                   label="best")
                    else:
                        ax24a.plot(sltime,
                                   slvol - slvol0,
                                   color=cf.colorscheme[3],
                                   linewidth=1.5,
                                   zorder=3,
                                   label="best")
                        #ax24a.plot(sltime,slvol0-slvol,color=cf.colorscheme[3],linewidth=1.5,zorder=3,label="best")

                ### ensemble mean ###################################
                if l == 0:
                    ensemble_mean_slvol = slvol * agg_score_prop[enum]
                    tslen = len(slvol)
                    sltime0 = sltime
                    ensemble_stddev_slvol = np.zeros_like(slvol)
                else:
                    ensemble_mean_slvol += slvol[-tslen:] * agg_score_prop[enum]
        if not plotgradient:
            if plottopresent:
                slvol0 = ensemble_mean_slvol[-1]
            ax24a.plot(sltime0,
                       ensemble_mean_slvol - slvol0,
                       color=cf.colorscheme[2],
                       linewidth=2.0,
                       zorder=2,
                       label="mean")
            #ax24a.plot(sltime0,slvol0-ensemble_mean_slvol,color=cf.colorscheme[2],linewidth=2.0,zorder=2,label="mean")

        ### ensemble standard deviation ########################
        for l, (enum, sl) in enumerate(sorted(sl_data.items())):
            ensemble_stddev_slvol += (ensemble_mean_slvol -
                                      sl[-tslen:])**2 * agg_score_prop[enum]
        ensemble_stddev_slvol = np.sqrt(ensemble_stddev_slvol)
        #stnd_upper = slvol0-ensemble_mean_slvol+ensemble_stddev_slvol
        #stnd_lower = slvol0-ensemble_mean_slvol-ensemble_stddev_slvol
        stnd_upper = ensemble_mean_slvol - slvol0 + ensemble_stddev_slvol
        stnd_lower = ensemble_mean_slvol - slvol0 - ensemble_stddev_slvol
        if not plotgradient:
            ax24a.fill_between(sltime0[::10],
                               stnd_upper[::10],
                               stnd_lower[::10],
                               color=cf.colorscheme[2],
                               zorder=0,
                               alpha=0.4,
                               label="stdev")
            #ax24a.plot(sltime0,stnd_upper,color=cf.colorscheme[2],linewidth=2,zorder=2,linestyle="dotted",label="stdev")
            #ax24a.plot(sltime0,stnd_lower,color=cf.colorscheme[2],linewidth=2,zorder=2,linestyle="dotted")

        legend24 = ax24a.legend(loc="lower left", shadow=False, fontsize=11)
        rcParams['legend.frameon'] = 'False'

        ### min and max values ##################################
        pm = str(r"$\pm$")
        pd_mean_sl = ensemble_mean_slvol[-1]
        #pd_span      = str(np.around(stnd_lower[-1],decimals=1))
        #pd_span     += " - "+str(np.around(stnd_upper[-1],decimals=1))+" \n "+volunit
        pd_span = str(np.around(pd_mean_sl - slvol0, decimals=1)) + pm
        pd_span += str(np.around(ensemble_stddev_slvol[-1],
                                 decimals=1))  #+" \n "+volunit

        lgm_mean_sl = np.max(ensemble_mean_slvol)
        lgm_mean_k = np.argmax(ensemble_mean_slvol)
        lgm_mean_k2 = np.argmin((sltime0 + 15)**2)
        lgm_mean_t = sltime0[lgm_mean_k]
        #lgm_span     = str(np.around(stnd_lower[lgm_mean_k2],decimals=1))
        #lgm_span    += " - "+str(np.around(stnd_upper[lgm_mean_k2],decimals=1))+" \n "+volunit
        lgm_span = str(
            np.around(ensemble_mean_slvol[lgm_mean_k2] - slvol0,
                      decimals=1)) + pm
        lgm_span += str(
            np.around(ensemble_stddev_slvol[lgm_mean_k2],
                      decimals=1))  #+" \n "+volunit

        eeam_mean_sl = np.min(ensemble_mean_slvol[-tslen:-tslen / 2])
        eeam_mean_k = np.argmin(ensemble_mean_slvol[-tslen:-tslen / 2])
        eeam_mean_k2 = np.argmin((sltime0 + 120)**2)
        eeam_mean_t = sltime0[eeam_mean_k]
        #eeam_span    = str(np.around(stnd_lower[eeam_mean_k2],decimals=1))
        #eeam_span   += " - "+str(np.around(stnd_upper[eeam_mean_k2],decimals=1))+" \n "+volunit
        eeam_span = str(
            np.around(ensemble_mean_slvol[eeam_mean_k2] - slvol0,
                      decimals=1)) + pm
        eeam_span += str(
            np.around(ensemble_stddev_slvol[eeam_mean_k2],
                      decimals=1))  #+" \n "+volunit

        degl_mean_k10 = np.argmin((sltime0 + 10)**2)
        degl_mean_k5 = np.argmin((sltime0 + 5)**2)

        #print pd_mean_sl,lgm_mean_sl,lgm_mean_t,eeam_mean_sl,eeam_span,eeam_mean_t
        #print slvol0,ensemble_mean_slvol[-1],ensemble_stddev_slvol[-1]

        print "Reconstructions of sea-level contributions:"
        print "period", "mean sl", "std sl", "time", "mean anomaly"
        print "\nPD:", pd_mean_sl, ensemble_stddev_slvol[-1], sltime0[
            -1], pd_mean_sl - slvol0
        print "LGM:", lgm_mean_sl, ensemble_stddev_slvol[
            lgm_mean_k], lgm_mean_t, lgm_mean_sl - slvol0
        print "LIG:", eeam_mean_sl, ensemble_stddev_slvol[
            eeam_mean_k], eeam_mean_t, eeam_mean_sl - slvol0, "\n"
        print "LGM2:", ensemble_mean_slvol[lgm_mean_k2], ensemble_stddev_slvol[
            lgm_mean_k2], sltime0[
                lgm_mean_k2], ensemble_mean_slvol[lgm_mean_k2] - slvol0
        print "LIG2:", ensemble_mean_slvol[
            eeam_mean_k2], ensemble_stddev_slvol[eeam_mean_k2], sltime0[
                eeam_mean_k2], ensemble_mean_slvol[eeam_mean_k2] - slvol0, "\n"
        print "DEG10:", ensemble_mean_slvol[
            degl_mean_k10], ensemble_stddev_slvol[degl_mean_k10], sltime0[
                degl_mean_k10], ensemble_mean_slvol[degl_mean_k10] - slvol0
        print "DEG5:", ensemble_mean_slvol[
            degl_mean_k5], ensemble_stddev_slvol[degl_mean_k5], sltime0[
                degl_mean_k5], ensemble_mean_slvol[degl_mean_k5] - slvol0, "\n"

        #ax24a.text(eeam_mean_t,6.0*varmax/8.0,eeam_span,color=cf.colorscheme[2],zorder=3,fontsize=10)
        #ax24a.text(-35,varmax/2.0,lgm_span,color=cf.colorscheme[2],zorder=3,fontsize=10)
        #ax24a.text(-20,3.0*varmax/4.0,pd_span,color=cf.colorscheme[2],zorder=3,fontsize=10)

        ax24a.text(eeam_mean_t,
                   -3.0 * varmax / 8.0,
                   eeam_span,
                   color=cf.colorscheme[2],
                   zorder=3,
                   fontsize=11)
        ax24a.text(-30,
                   7.0 * varmax / 8.0,
                   lgm_span,
                   color=cf.colorscheme[2],
                   zorder=3,
                   fontsize=11)
        ax24a.text(-21,
                   -3.0 * varmax / 8.0,
                   pd_span,
                   color=cf.colorscheme[2],
                   zorder=3,
                   fontsize=11)

        if self.printtopdf:
            #printname = self.printname.replace("placeholder","slvolall")
            plt.savefig(printname, format='pdf')
            plt.savefig(printname.replace(".pdf", ".png"),
                        format='png',
                        dpi=300)
Esempio n. 35
0
points = 500

cmap = elphmod.plot.colormap(
    (0, elphmod.plot.Color(255, 255, 255)),
    (1, elphmod.plot.Color(0, 0, 0)),
)

el = elphmod.el.Model('data/NbSe2')

e = elphmod.dispersion.dispersion_full(el.H, nk)[:, :, 0] - mu

kxmax, kymax, kx, ky, e = elphmod.plot.toBZ(e,
                                            points=points,
                                            return_k=True,
                                            outside=np.nan)

dedky, dedkx = np.gradient(e, ky, kx)
dedk = np.sqrt(dedkx**2 + dedky**2)

image = elphmod.plot.color(dedk, cmap)

if comm.rank == 0:
    elphmod.plot.save('fermi_velocity.png', image)

info('Min./max./mean number of k-points for meV resolution:')

FS = np.where(np.logical_and(~np.isnan(dedk), abs(e) < 0.1))

for v in dedk[FS].min(), dedk[FS].max(), np.average(dedk[FS]):
    info(int(round(2 * kymax * v / 1e-3)))
Esempio n. 36
0
    fs_solutions = []
    normvalue = np.inf
    l = 0
    while normvalue > TOL and l < numTimeSteps:

        # solve non-localized system
        lod = lod_wave.LodWave(b_coef, world, np.inf, IPatchGenerator, a_coef,
                               prev_fs_sol, ms_basis)
        lod.solve_fs_system()

        # store sparse solution
        prev_fs_sol = sparse.csc_matrix(np.array(np.column_stack(lod.fs_list)))
        fs_solutions.append(prev_fs_sol)

        normvalue = np.sqrt(
            np.dot(np.gradient(prev_fs_sol.toarray()[:, N / 2]),
                   np.gradient(prev_fs_sol.toarray()[:, N / 2])))

        l += 1
        print 'N = %d, l = %d' % (N, l)
    '''
    Compute v^n and w^n
    '''

    # initial value
    Uo = xpCoarse * (1 - xpCoarse)

    # coarse v^(-1) and v^0
    V = [Uo]
    V.append(Uo)
Esempio n. 37
0
def do_boxcar(image, psf, outwave, boxwidth=2.5, nspec=500):
    """Extracts spectra row by row, given the centroids

    Args:
        image  : desispec.image object
        psf: desispec.psf.PSF like object
            Or do we just parse the traces here and write a separate wrapper to handle this? Leaving psf in the input argument now.
        outwave: wavelength array for the final spectra output
        boxwidth: HW box size in pixels

    Returns desispec.frame.Frame object
    """
    import math
    from desispec.frame import Frame

    #wavelength=psf.wavelength() # (nspec,npix_y)
    wmin = psf.wmin
    wmax = psf.wmax
    waves = np.arange(wmin, wmax, 0.25)
    xs = psf.x(None, waves)  #- xtraces # doing the full image here.
    ys = psf.y(None, waves)  #- ytraces

    camera = image.camera
    spectrograph = int(camera[1:])  #- first char is "r", "b", or "z"
    mask = np.zeros(image.pix.T.shape)
    maxx, maxy = mask.shape
    maxx = maxx - 1
    maxy = maxy - 1
    ranges = np.zeros((mask.shape[1], xs.shape[0] + 1), dtype=int)
    for bin in xrange(0, len(waves)):
        ixmaxold = 0
        for spec in xrange(0, xs.shape[0]):
            xpos = xs[spec][bin]
            ypos = int(ys[spec][bin])
            if xpos < 0 or xpos > maxx or ypos < 0 or ypos > maxy:
                continue
            xmin = xpos - boxwidth
            xmax = xpos + boxwidth
            ixmin = int(math.floor(xmin))
            ixmax = int(math.floor(xmax))
            if ixmin <= ixmaxold:
                print "Error Box width overlaps,", xpos, ypos, ixmin, ixmaxold
                return None, None
            ixmaxold = ixmax
            if mask[int(xpos)][ypos] > 0:
                continue
        # boxing in x vals
            if ixmin < 0:  #int value is less than 0
                ixmin = 0
                rxmin = 1.0
            else:  # take part of the bin depending on real xmin
                rxmin = 1.0 - xmin + ixmin
            if ixmax > maxx:  # xmax is bigger than the image
                ixmax = maxx
                rxmax = 1.0
            else:  # take the part of the bin depending on real xmax
                rxmax = xmax - ixmax
            ranges[ypos][spec + 1] = math.ceil(xmax)  #end at next column
            if ranges[ypos][spec] == 0:
                ranges[ypos][spec] = ixmin
            mask[ixmin][ypos] = rxmin
            for x in xrange(ixmin + 1, ixmax):
                mask[x][ypos] = 1.0
            mask[ixmax][ypos] = rxmax
    for ypos in xrange(ranges.shape[0]):
        lastval = ranges[ypos][0]
        for sp in xrange(1, ranges.shape[1]):
            if ranges[ypos][sp] == 0:
                ranges[ypos][sp] = lastval
            lastval = ranges[ypos][sp]

    maskedimg = (image.pix * mask.T)
    flux = np.zeros((maskedimg.shape[0], ranges.shape[1] - 1))
    for r in xrange(flux.shape[0]):
        row = np.add.reduceat(maskedimg[r], ranges[r])[:-1]
        flux[r] = row

    from desispec.interpolation import resample_flux

    wtarget = outwave
    #- limit nspec to psf.nspec max
    if nspec > psf.nspec:
        nspec = psf.nspec
        print "Warning! Extracting only %s spectra" % psf.nspec

    fflux = np.zeros((nspec, len(wtarget)))
    ivar = np.zeros((nspec, len(wtarget)))
    resolution = np.zeros(
        (nspec, 21, len(wtarget)
         ))  #- placeholder for online case. Offline should be usable
    #TODO get the approximate resolution matrix for online purpose or don't need them? How to perform fiberflat, sky subtraction etc or should have different version of them for online?

    #- convert to per angstrom first and then resample to desired wave length grid.

    for spec in xrange(nspec):
        ww = psf.wavelength(spec)
        dwave = np.gradient(ww)
        flux[:, spec] /= dwave
        fflux[spec, :] = resample_flux(wtarget, ww, flux[:, spec])
        #- image.readnoise is no more a scalar but a full CCD pixel size array
        #- TODO Using median readnoise here for now. Need to propagate per-pixel readnoise from top.
        readnoise = np.median(image.readnoise)
        ivar[spec, :] = 1. / (
            fflux[spec, :].clip(0.0) + 2 * boxwidth * readnoise**2
        )  #- 2*half width=boxsize

    return fflux, ivar, resolution
Esempio n. 38
0
def InitialConditions(x, rho, v, Pressure, args, p):

    N = args.n
    dx = np.gradient(x)  # Cell Sizes (UNIFORM GRID ONLY)
    M = (rho * dx).sum()  # Total Mass (UNIFORM GRID ONLY)

    # Particle Parameters
    mp = M / N  # Particle Mass, Total Mass divided by total particles
    p['mp'] = mp
    # Final number of particles per cell, and total
    Np = np.floor((rho * dx) / mp +
                  np.random.uniform(size=rho.size)).astype(int)
    NP = Np.sum()
    p['NP'] = NP
    # Particle Diameter
    Dmax = (dx / Np).min()
    D = 1e-4 * Dmax
    D = 0
    p['D'] = D

    #Initialize Velocities
    s = np.sqrt(Pressure / rho)

    # bin and effbin sizes
    # velocities
    effbins = np.array([])
    bins = np.array([])
    effbins = []
    bins = []

    vels = []
    for j in range(len(dx)):

        effbins.append(np.ones(Np[j]) * (dx[j] / Np[j] - D))
        bins.append(np.ones(Np[j]) * dx[j] / Np[j])

        vels.append(np.random.randn(Np[j]) * s[j] + v[j])

    #effbins = np.array(effbins)
    #bins = np.array(bins)
    #pos_in_bin = effbins*np.random.uniform(size=effbins.size)
    effbins = np.concatenate(effbins)
    bins = np.concatenate(bins)
    vels = np.concatenate(vels)
    uniforms = np.random.random(Np.sum())

    # Computing particle dx's
    if args.b == 0:  #Periodic Boundary Conditions
        Pdx = np.roll(effbins * uniforms, -1) - effbins * uniforms + bins
        v0 = vels[0]
        P0 = uniforms[0] * effbins[0] + D / 2
        vels = np.roll(vels, -1) - vels
        P = np.concatenate(([P0], P0 + np.cumsum(Pdx[:-1])))
    elif args.b == 1:  #Reflective Boundary Conditions #NEED TO FIX VELOCITY
        Pdx = effbins[1:] * uniforms[
            1:] - effbins[:-1] * uniforms[:-1] + bins[:-1]
        vels = np.roll(vels, -1) - vels
        P0 = uniforms[0] * effbins[0] + D / 2
        Pdx = np.append(np.array([P0]), Pdx)
        P = np.cumsum(Pdx)
    #MP = mp*np.arange(1,Np.sum())
    #rhoP = mp/Pdx

    return P, Pdx, vels, P0, p
Esempio n. 39
0
File: IMB.py Progetto: cdknorow/IBZ
        fid.write('%f %f\n' % (x_target[j], gr_new[j]))
    fid.close()

    #test the fit
    F_fit(gr_target, gr_new)
    #add a boltzman to the potential to get the next potential
    V_new = []
    for j in range(len(x_target)):
        try:
            boltz = .2 * T * math.log(gr_new[j] / gr_target[j])
            if math.isnan(boltz):
                V_new.append(V[j])
            elif math.isinf(boltz):
                V_new.append(V[j])
            else:
                V_new.append(V[j] +
                             alpha[j] * T * math.log(gr_new[j] / gr_target[j]))
        except:
            print 'error', gr_new[j], gr_target[j]
            V_new.append(V[j])
    #Smooth Out V
    V = Smooth(V_new)
    F_new = -np.gradient(V, x_target[1] - x_target[0])
    #write new potential table
    pot = open('potential/potential%i.dat' % i, 'w')
    pot.write('#r V F\n')
    for j in range(len(x_target)):
        pot.write('%f %f %f\n' % (x_target[j], V[j], F_new[j]))
    pot.close()
    table.set_from_file('A', 'A', filename='potential/potential%i.dat' % i)
def average_energy(read_Z_data=True,
                   generate_Z_data=False,
                   Z_file_name=None,
                   plot_energy=True,
                   save_plot_E=True,
                   show_plot_E=True,
                   E_plot_name=None,
                   temp_min=1. / 10,
                   temp_max=1 / 2.,
                   N_temp=10,
                   save_Z_csv=True,
                   relevant_info_Z=None,
                   print_Z_data=True,
                   x_max=7.,
                   nx=201,
                   N_iter=7,
                   potential=harmonic_potential,
                   potential_string='harmonic_potential',
                   print_steps=False,
                   save_pi_x_data=False,
                   pi_x_file_name=None,
                   relevant_info_pi_x=None,
                   plot_pi_x=False,
                   save_plot_pi_x=False,
                   show_plot_pi_x=False):
    """
    """
    if read_Z_data:
        Z_file_read = pd.read_csv(Z_file_name, index_col=0, comment='#')
    elif generate_Z_data:
        t_0 = time()
        Z_data = Z_several_values(
            temp_min, temp_max, N_temp, save_Z_csv, Z_file_name,
            relevant_info_Z, print_Z_data, x_max, nx, N_iter, potential,
            potential_string, print_steps, save_pi_x_data, pi_x_file_name,
            relevant_info_pi_x, plot_pi_x, save_plot_pi_x, show_plot_pi_x)
        t_1 = time()
        print(
            '--------------------------------------------------------------------------\n'
            + '%d values of Z(beta) generated   -->   %.3f sec.' %
            (N_temp, t_1 - t_0))
        Z_file_read = Z_data
    else:
        print(
            'Elegir si se generan o se leen los datos para la función partición, Z.\n'
            +
            'Estas opciones son mutuamente exluyentes. Si se seleccionan las dos, el'
            + 'algoritmo escoge leer los datos.')
    # READ DATA IS OK
    beta_read = Z_file_read['beta']
    temp_read = Z_file_read['temperature']
    Z_read = Z_file_read['Z']

    E_avg = np.gradient(-np.log(Z_read), beta_read)

    if plot_energy:
        plt.figure(figsize=(8, 5))
        plt.plot(
            temp_read,
            E_avg,
            label=u'$\langle E \\rangle$ via path integral\nnaive sampling')
        plt.plot(temp_read,
                 E_QHO_avg_theo(beta_read),
                 label=u'$\langle E \\rangle$ teórico')
        plt.legend(loc='best')
        plt.xlabel(u'$T$')
        plt.ylabel(u'$\langle E \\rangle$')
        if save_plot_E:
            if E_plot_name is None:
                script_dir = os.path.dirname(os.path.abspath(__file__))
                E_plot_name='E-ms-plot-%s-beta_max_%.3f-'%(potential_string,1./temp_min) +\
                            'beta_min_%.3f-N_temp_%d-x_max_%.3f-'%(1./temp_max,N_temp,x_max) +\
                            'nx_%d-N_iter_%d.eps'%(nx, N_iter)
                E_plot_name = script_dir + '/' + E_plot_name
            plt.savefig(E_plot_name)
        if show_plot_E:
            plt.show()
        plt.close()
    return E_avg, beta_read.to_numpy()
Esempio n. 41
0
def add_cyclic(data):
    # Add Cyclic point around 360 degrees longitude:
    lons = data.getLongitude()[:]
    dx = np.gradient(lons)[-1]
    data2 = data(longitude=(0, dx + np.max(lons)), squeeze=True)
    return data2
                        potential_string = 'harmonic_potential', print_steps=False,
                        save_pi_x_data=False, pi_x_file_name=None, relevant_info_pi_x=None, 
                        plot=False, save_plot=False, show_plot=False   )
    t_1= time()
    print('<E(beta)>   -->   %.3f sec.'%(t_1-t_0))

# READ DATA IS OK
Z_file_name = script_dir+'/'+'partition-function-test-2.csv'
Z_file_read =  pd.read_csv(Z_file_name, index_col=0, comment='#')
beta_read = Z_file_read['beta']
beta_read = beta_read.to_numpy()
temp_read = Z_file_read['temperature']
temp_read = temp_read.to_numpy()
Z_read = Z_file_read['Z']
Z_read = Z_read.to_numpy()

E_avg = np.gradient(-np.log(Z_read),beta_read)
def Z_QHO(beta):
    return 0.5/np.sinh(beta/2)
def E_QHO_avg_theo(beta):
    return 0.5/np.tanh(0.5*beta)

plt.figure()
plt.plot(temp_read,E_avg,label=u'$< E > Path Integral$')
plt.plot(temp_read,E_QHO_avg_theo(beta_read),label=u'$< E > theory$')
plt.plot(temp_read,Z_read,'v-',label=u'$ Z(T) $')
plt.legend(loc='best')
plt.xlabel(u'$T$')
plt.ylabel(u'$< E >$ or $Z(T)$')
plt.show()
plt.close()
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import brentq
from solver import I1, I1andI2


t = np.linspace(0., 1, 10000 - 1)
u = np.cos(1. * 2. * np.pi * t)

print(I1andI2(u))

def F(x):
    u[int(u.size / 2)] = x
    return I1(u)

u[int(u.size / 2)] = brentq(F, -1.5902, -1.5901, xtol=1e-14)

print(I1andI2(u))

plt.title("Numerically exact solution")
plt.subplot(121)
plt.title("function")
plt.plot(t, u, '-')


plt.subplot(122)
plt.title("derivatives")
plt.plot(t, np.gradient(u), '-')

plt.show()
Esempio n. 44
0
def Pressure(x, y, u, v, x_body, y_body, step=1, rho=1, nu=128 / 150):
    """
    Calculates the pressure field from the velocity field. To avoid problems
    with the velocity inside the body, the integration is carried out always
    only up to the body. 

    Parameters
    ----------
    x : 2D-Array
        x-Coordinates.
    y : 2D-Array
        x-Coordinates.
    u : 2D-Array
        Velocity x-component.
    v : 2D-Array
        Velocity x-component.
    x_body : 1D-Array
        x-coordinates of body.
    y_body : 1D-Array
        y-coordinates of body.
    step : int, optional
        Step size on grid. The default is 1.
    rho : float, optional
        Density. The default is 1.
    nu : float, optional
        Viscosity. The default is 128/150.

    Returns
    -------
    p : 2D-Array
        Pressure field.

    """
    dudx = np.gradient(u, step, axis=1)
    dudy = np.gradient(u, step, axis=0)

    dpdx = -(rho * (u * dudx + v * dudy) + nu *
             (np.gradient(dudx, step, axis=1)) +
             np.gradient(dudy, step, axis=0))

    dvdx = np.gradient(v, step, axis=1)
    dvdy = np.gradient(v, step, axis=0)

    dpdy = -(
        rho * (u * dvdx + v * dvdy) + nu *
        (np.gradient(dvdx, step, axis=1) + np.gradient(dvdy, step, axis=0)))

    p = np.empty_like(x)
    p[:, 0] = step * dpdx[:, 0]
    i = 1
    while (x[0, i] < np.min(x_body)) and i < len(x):
        p[:, i] = p[:, i - 1] + step * dpdx[:, i]
        i += 1

    for k in range(i, len(x)):
        p[0, k] = p[0, k - 1] + step * dpdx[0, k]
        p[-1, k] = p[-1, k - 1] + step * dpdx[-1, k]

    k = 1
    while (y[k, 0] < np.min(y_body)) and k < len(x):
        p[k, :] = p[k - 1, :] + step * dpdy[k, :]
        k += 1

    l = len(x) - 2
    while (y[l, 0] > np.max(y_body)) and l > 0:
        p[l, :] = p[l + 1, :] - step * dpdy[l, :]
        l -= 1

    while x[0, i] <= np.max(x_body):
        yl = np.interp(x[0, i], x_body, y_body)
        ind = abs(yl - y[:, 0]).argmin()
        for m in range(k, ind):
            p[m, i] = p[m - 1, i] + step * dpdy[k, i]
        for m in range(l - ind):
            p[l - m, i] = p[l - m + 1, i] - step * dpdy[l - m, i]

        i += 1

    yl = np.interp(x[0, i], x_body, y_body)
    ind = abs(yl - y[:, 0]).argmin()
    for m in range(k, ind):
        p[m, i] = p[m - 1, i] + step * dpdy[k, i]
    for m in range(l - ind):
        p[l - m, i] = p[l - m + 1, i] - step * dpdy[l - m, i]

    i += 1
    for m in range(k, l + 1):
        p[m, i:] = p[m - 1, i:] + step * dpdy[m, i:]
    return p
Esempio n. 45
0
def Main(p, q, r):
    ###緯度経度メッシュ作成

    ###緯度経度メッシュ->直交座標メッシュ
    X, Y = Convert(LON, LAT, theta, point[0], point[1])

    ###Python_mesh output
    with open('xy_py2ft.txt', 'w') as f:
        for i in range(N):
            plot = "{:.7f}\t{:.7f}\n".format(X[i], Y[i])
            f.write(plot)

    ###Fortran_calculate
    Fortran(r)

    ###Fortran_result_input
    with open('result_ft2py.csv', 'r') as f:
        df = np.loadtxt(f, delimiter=",", skiprows=0)

    ###Reshaping
    UX = df[:, 0]
    UY = df[:, 1]
    UZ = df[:, 2]
    #    UZX=df[:,3]
    #    UZY=df[:,4]
    #    UZZ=df[:,5]
    UX_pyplot = UX.reshape((yscale, xscale))
    UY_pyplot = UY.reshape((yscale, xscale))
    UZ_pyplot = UZ.reshape((yscale, xscale))
    #    UZX_pyplot=UZX.reshape((yscale,xscale))
    #    UZY_pyplot=UZY.reshape((yscale,xscale))
    #    UZZ_pyplot=UZZ.reshape((yscale,xscale))
    FP = Rectangleplot(AL1, AL2, AW1, AW2, point[0], point[1], theta)

    ###溶岩層の傾斜について
    ###2019/1/24 とりあえず,np.gradientのため、真ん中あたりの間隔[m]を求める
    Xkankaku = (X[xscale * int(yscale / 2) + 1] -
                X[xscale * int(yscale / 2 + 1) + 1]) * 1000
    Ykankaku = (Y[int(yscale / 2)] - Y[int(yscale / 2) - 1]) * 1000

    UZX, UZY = np.gradient(UZ_pyplot, Xkankaku, Ykankaku)
    UZY = -UZY
    UZX = -UZX
    THETA_pyplot = np.rad2deg(np.arccos(np.sqrt(1 / (1 + UZX**2 + UZY**2))))
    #DIREC=np.rad2deg(np.arctan2(UZX,UZY))
    #GRAD_pyplot=GRAD.reshape((yscale,xscale))
    #THETA_pyplot=THETA.reshape((yscale,xscale))
    THETA = THETA_pyplot.reshape(N, 1)
    UZX = UZX.reshape(N, 1)
    UZY = UZY.reshape(N, 1)

    ###GMT output
    if p == 1:

        import time

        #別に速くならなかった
        '''
        LON_g=np.reshape(LON, (LON.shape[0], 1))
        LAT_g=np.reshape(LAT, (LAT.shape[0], 1))
        UZ_g=np.reshape(UZ, (UZ.shape[0], 1))
        np.savetxt('GMT_py2gmt.txt',np.concatenate([LON_g,LAT_g,UZ_g],axis=1), fmt="%.7f")
        '''

        with open('断層重ね合わせ/B/UZ_%s.txt' % str(i_main), 'w') as f_plot:
            for i in range(0, N):
                u = "{:.7f}\t{:.7f}\t{:.7f}\n".format(LON[i], LAT[i], UZ[i])
                f_plot.write(u)
        with open('断層重ね合わせ/B/UX_%s.txt' % str(i_main), 'w') as f_plot:
            for i in range(0, N):
                u = "{:.7f}\t{:.7f}\t{:.7f}\n".format(LON[i], LAT[i], UX[i])
                f_plot.write(u)
        with open('断層重ね合わせ/B/UY_%s.txt' % str(i_main), 'w') as f_plot:
            for i in range(0, N):
                u = "{:.7f}\t{:.7f}\t{:.7f}\n".format(LON[i], LAT[i], UY[i])
                f_plot.write(u)
        '''
        with open('GMT_py2gmt-2.txt', 'w') as f_plot:
            for i in range(4):
                edge= "{:.7f}\t{:.7f}\n".format(FP[i][0],FP[i][1])
                f_plot.write(edge)
        '''
        '''        
        with open('GMT_py2gmt-3slope.txt', 'w') as f_plot:
            for i in range(0,N):
                u = "{:.7f}\t{:.7f}\t{:.7f}\n".format(LON[i], LAT[i], float(THETA[i]))
                f_plot.write(u)    
        with open('GMT_py2gmt-uzx.txt', 'w') as f_plot:
            for i in range(0,N):
                u = "{:.7f}\t{:.7f}\t{:.7f}\n".format(LON[i], LAT[i], float(UZX[i]))
                f_plot.write(u) 
        with open('GMT_py2gmt-uzy.txt', 'w') as f_plot:
            for i in range(0,N):
                u = "{:.7f}\t{:.7f}\t{:.7f}\n".format(LON[i], LAT[i], float(UZY[i]))
                f_plot.write(u)                 
        '''
        print('GMTdata sucessfully outputed.')
    if q == 1:
        ###python plot
        KEIDO_plot, IDO_plot = np.meshgrid(KEIDO, IDO)

        print(FP)

        fig = plt.figure()
        ax = plt.axes()
        pz = ax.pcolor(KEIDO_plot, IDO_plot, THETA_pyplot, cmap='coolwarm')
        cz = ax.contour(KEIDO_plot,
                        IDO_plot,
                        THETA_pyplot,
                        colors=['black'],
                        linewidths=1,
                        linestyles='dashed')
        ax.clabel(cz, fontsize=8)
        #ax.quiver(X,Y,DX,DY)
        plt.show()

        fig, axes = plt.subplots(1, 3, figsize=(13, 4))
        px = axes[0].pcolor(KEIDO_plot, IDO_plot, UX_pyplot, cmap='coolwarm')
        py = axes[1].pcolor(KEIDO_plot, IDO_plot, UY_pyplot, cmap='coolwarm')
        pz = axes[2].pcolor(KEIDO_plot, IDO_plot, UZ_pyplot, cmap='coolwarm')
        cx = axes[0].contour(KEIDO_plot,
                             IDO_plot,
                             UX_pyplot,
                             colors=['black'],
                             linewidths=1,
                             linestyles='dashed')
        cy = axes[1].contour(KEIDO_plot,
                             IDO_plot,
                             UY_pyplot,
                             colors=['black'],
                             linewidths=1,
                             linestyles='dashed')
        cz = axes[2].contour(KEIDO_plot,
                             IDO_plot,
                             UZ_pyplot,
                             colors=['black'],
                             linewidths=1,
                             linestyles='dashed')
        axes[0].clabel(cx, fontsize=8)
        axes[1].clabel(cy, fontsize=8)
        axes[2].clabel(cz, fontsize=8)
        for i in range(3):
            fault = plt.Polygon(((FP[0], FP[1], FP[2], FP[3])),
                                fc="#770000",
                                alpha=0.5)
            axes[i].add_patch(fault)


#            axes[i].set_xlabel('longitude', fontsize=14)
#            axes[i].set_ylabel('latitude', fontsize=14)
        axes[0].set_title('X-Displacement[m]')
        axes[1].set_title('Y-Displacement[m]')
        axes[2].set_title('Z-Displacement[m]')

        #plt.gca().xaxis.set_major_formatter(plt.FormatStrFormatter('%.2f'))
        #plt.gca().yaxis.set_major_formatter(plt.FormatStrFormatter('%.2f'))
        #plt.gca().xaxis.get_major_formatter().set_useOffset(False)
        #plt.gca().yaxis.get_major_formatter().set_useOffset(False)
        #fig.colorbar(px)
        #fig.colorbar(py)
        #fig.colorbar(pz)

        plt.show()
Esempio n. 46
0
import numpy as np
import yaml
import matplotlib.pyplot as plt

if __name__ == "__main__":
    CONFIG = 'config.yaml'
    with open(CONFIG) as f:
        path = yaml.load(f)
    J1_PATH = path['robotCalibration'] + 'goal/j1.yaml'

    with open(J1_PATH) as f:
        J1p = yaml.load(f)

    J1v = np.gradient(J1p)
    plt.figure()
    plt.plot(J1p, 'r.')
    plt.figure()
    plt.plot(J1v)
    plt.show()
Esempio n. 47
0
def Ze(diameters,
       psd,
       wavelength,
       properties,
       ref_index=None,
       temperature=None,
       mass=None,
       theta=0.0,
       bck=None,
       K2=None):
    """radar reflectivity
	Compute radar reflectivity directly from hydrometeor parameters

	Parameters
	----------
	diameters : array(Nparticles) - double
			spectrum of diameters of the particles [meters]
	psd : callable
		size distribution of the particle 
		concentration [meters^-1 meters^-3]
	wavelength : scalar - double
		electromagnetic wavelength to be passed to the snowScatt 
		properties calculator
	properties : string
		name of the snowflake properties to call from the snowLibrary
	ref_index : scalar - complex (default to None)
		complex refractive index of ice to be passed to the snowScatt 
		properties calculator
	temperature : scalar - double
		absolute temperature, alternative formulation of ref_index when 
		ref_index is None to be passed to the snowScatt properties 
		calculator
	mass : array(Nparticles) - double (default to None)
		mass of the snowflakes to be passed to the snowScatt properties 
		calculator if left None the mass is derived by the snowLibrary 
		properties
	theta : scalar - double - (default to 0.0 vertical pointing)
		zenith incident angle of the electromagnetic radiation, to be passed
		to the snowScatt properties calculator
	bck : array(Nparticles) - double (default to None)
		radar backscattering cross-section [meters**2] override calculation of
		bck using particle parameters
	K2 : scalar - double 
		Rayleigh dielectric factor K^2 (dimensionless)
		K = (n^2 - 1)/(n^2 + 2) for the Clausius Mossotti relation
		override calculation of K2 from dielectric properties (useful for
		multirequency radar cross-calibration)

	Returns
	-------
	Z : scalar - double
		Radar reflectivity in logaritmic units [dBZ]

	"""
    freq = _c / wavelength
    if bck is None:  # compute only if not precalculated
        bck = backscatter(diameters, wavelength, properties, ref_index,
                          temperature, mass, theta)
    if K2 is None:  # compute only if not precalculated
        eps = refractiveIndex.water.eps(temperature, freq, 'Turner')
        K2 = refractiveIndex.utilities.K2(eps)
    z = specific_reflectivity(wavelength, bck, K2)
    Z = dB(np.sum(z * psd * np.gradient(diameters), axis=-1))

    return Z
Esempio n. 48
0
    def peak_response(self, T, dr):
        """
    RESPONSE SPECTRUM
    Computes the peak dynamic response of a single-degree-of-freedom systems
    (mass-spring-dashpot) using the Newmark Method for linear systems

    Inputs
     - a  : input accelorgram (in ms-2)
     - T  : fundametal period (in s)
     - dr : damping ratio = damping coeficient / critical damping coefficient

    Outputs
     - maxA : peak acceleration response
     - maxV : peak velocity response
     - maxD : peak displacement response

    """

        if not self.velocity.size:
            print("Reading velocity traces")
            veloc = self.read_seismo(filter_s=True)
        else:
            veloc = self.velocity

        # Integrate the velocities to get acceleration
        accel = np.gradient(veloc, self.dt, axis=0)

        if isinstance(T, (int, float, list, tuple)):
            T = np.array(T)
        l = T.size
        a = np.zeros((l, *accel.shape))
        v = np.zeros((l, *accel.shape))
        d = np.zeros((l, *accel.shape))

        # Parameters defined for the average acceleration method (Page 177 of Chopra's bok')
        gamma = 0.5
        beta = 0.25

        # Properties of the SDOF
        w = 2 * np.pi / T  # Angular frequency
        w = w[:, np.newaxis]
        m = 1  # Mass
        k = m * w**2  # stifness
        dc = 2.0 * dr * m * w

        # Initial calculations
        a[:, 0, :] = ((-1 * m * accel[0, :]) - (dc * v[:, 0, :]) -
                      (k * d[:, 0, :])) / m

        kk = k + ((gamma * dc) / (beta * self.dt)) + (m / (beta * self.dt**2))

        var_a = (m / (beta / self.dt)) + ((gamma * dc) / beta)

        var_b = (m / (2.0 * beta)) + (self.dt * dc * ((gamma /
                                                       (2 * beta)) - 1))

        # Iteration
        for j in range(1, accel.shape[0]):
            dp = (-1 * m  * ( accel[j,:] - accel[j-1,:]) ) + (var_a * v[:,j-1,:]) \
                            + (var_b * a[:,j-1,:])
            du = dp / kk

            dv = ( (gamma * du) / (beta / self.dt) ) - ( gamma * v[:,j-1,:] / beta ) + \
                      ( self.dt *  a[:,j-1,:] * ( 1 - (gamma/(2.0 * beta)) ) )

            da = (du /
                  (beta * self.dt**2)) - (v[:, j - 1, :] /
                                          (beta * self.dt)) - (a[:, j - 1, :] /
                                                               (2.0 * beta))

            d[:, j, :] = d[:, j - 1, :] + du
            v[:, j, :] = v[:, j - 1, :] + dv
            a[:, j, :] = a[:, j - 1, :] + da

        self.maxA = np.max(np.abs(a + accel), axis=1)
        self.maxV = np.max(np.abs(v), axis=1)
        self.maxD = np.max(np.abs(d), axis=1)

        return self.maxA
Esempio n. 49
0
def get_normal(depth_refine, fx=-1, fy=-1, cx=-1, cy=-1, for_vis=True):
    res_y = depth_refine.shape[0]
    res_x = depth_refine.shape[1]

    # inpainting
    scaleOri = np.amax(depth_refine)
    print(scaleOri)

    inPaiMa = np.where(depth_refine == 0.0, 255, 0)
    inPaiMa = inPaiMa.astype(np.uint8)
    inPaiDia = 5.0
    depth_refine = depth_refine.astype(np.float32)
    depPaint = cv2.inpaint(depth_refine, inPaiMa, inPaiDia, cv2.INPAINT_NS)

    depNorm = depPaint - np.amin(depPaint)
    rangeD = np.amax(depNorm)
    depNorm = np.divide(depNorm, rangeD)
    depth_refine = np.multiply(depNorm, scaleOri)

    depth_inp = copy.deepcopy(depth_refine)

    centerX = cx
    centerY = cy

    constant = 1 / fx
    uv_table = np.zeros((res_y, res_x, 2), dtype=np.int16)
    column = np.arange(0, res_y)

    uv_table[:, :, 1] = np.arange(0, res_x) - centerX  # x-c_x (u)
    uv_table[:, :, 0] = column[:, np.newaxis] - centerY  # y-c_y (v)
    uv_table_sign = np.copy(uv_table)
    uv_table = np.abs(uv_table)

    # kernel = np.ones((5, 5), np.uint8)
    # depth_refine = cv2.dilate(depth_refine, kernel, iterations=1)
    # depth_refine = cv2.medianBlur(depth_refine, 5 )
    depth_refine = ndimage.gaussian_filter(depth_refine, 2)  # sigma=3)
    # depth_refine = ndimage.uniform_filter(depth_refine, size=11)

    # very_blurred = ndimage.gaussian_filter(face, sigma=5)
    v_x = np.zeros((res_y, res_x, 3))
    v_y = np.zeros((res_y, res_x, 3))
    normals = np.zeros((res_y, res_x, 3))

    dig = np.gradient(depth_refine, 2, edge_order=2)
    v_y[:, :, 0] = uv_table_sign[:, :, 1] * constant * dig[0]
    v_y[:, :, 1] = depth_refine * constant + (uv_table_sign[:, :, 0] *
                                              constant) * dig[0]
    v_y[:, :, 2] = dig[0]

    v_x[:, :,
        0] = depth_refine * constant + uv_table_sign[:, :,
                                                     1] * constant * dig[1]
    v_x[:, :, 1] = uv_table_sign[:, :, 0] * constant * dig[1]
    v_x[:, :, 2] = dig[1]

    cross = np.cross(v_x.reshape(-1, 3), v_y.reshape(-1, 3))
    norm = np.expand_dims(np.linalg.norm(cross, axis=1), axis=1)
    # norm[norm == 0] = 1

    cross = cross / norm
    cross = cross.reshape(res_y, res_x, 3)
    cross = np.abs(cross)
    cross = np.nan_to_num(cross)

    #cross[depth_refine <= 200] = 0  # 0 and near range cut
    cross[depth_refine > depthCut] = 0  # far range cut
    if not for_vis:
        scaDep = 1.0 / np.nanmax(depth_refine)
        depth_refine = np.multiply(depth_refine, scaDep)
        cross[:, :, 0] = cross[:, :, 0] * (1 - (depth_refine - 0.5)
                                           )  # nearer has higher intensity
        cross[:, :, 1] = cross[:, :, 1] * (1 - (depth_refine - 0.5))
        cross[:, :, 2] = cross[:, :, 2] * (1 - (depth_refine - 0.5))
        scaCro = 255.0 / np.nanmax(cross)
        cross = np.multiply(cross, scaCro)
        cross = cross.astype(np.uint8)

    return cross, depth_refine, depth_inp
Esempio n. 50
0
def plot_generated_toy_batch(X_real,
                             generator_model,
                             discriminator_model,
                             noise_dim,
                             gen_iter,
                             noise_scale=0.5):

    # Generate images
    X_gen = sample_noise(noise_scale, 10000, noise_dim)
    X_gen = generator_model.predict(X_gen)

    # Get some toy data to plot KDE of real data
    data = load_toy(pts_per_mixture=200)
    x = data[:, 0]
    y = data[:, 1]
    xmin, xmax = -1.5, 1.5
    ymin, ymax = -1.5, 1.5

    # Peform the kernel density estimate
    xx, yy = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
    positions = np.vstack([xx.ravel(), yy.ravel()])
    values = np.vstack([x, y])
    kernel = stats.gaussian_kde(values)
    f = np.reshape(kernel(positions).T, xx.shape)

    # Plot the contour
    fig = plt.figure(figsize=(10, 10))
    plt.suptitle("Generator iteration %s" % gen_iter,
                 fontweight="bold",
                 fontsize=22)
    ax = fig.gca()
    ax.contourf(xx,
                yy,
                f,
                cmap='Blues',
                vmin=np.percentile(f, 80),
                vmax=np.max(f),
                levels=np.linspace(0.25, 0.85, 30))

    # Also plot the contour of the discriminator
    delta = 0.025
    xmin, xmax = -1.5, 1.5
    ymin, ymax = -1.5, 1.5
    # Create mesh
    XX, YY = np.meshgrid(np.arange(xmin, xmax, delta),
                         np.arange(ymin, ymax, delta))
    arr_pos = np.vstack((np.ravel(XX), np.ravel(YY))).T
    # Get Z = predictions
    ZZ = discriminator_model.predict(arr_pos)
    ZZ = ZZ.reshape(XX.shape)
    # Plot contour
    ax.contour(XX, YY, ZZ, cmap="Blues", levels=np.linspace(0.25, 0.85, 10))
    dy, dx = np.gradient(ZZ)
    # Add streamlines
    # plt.streamplot(XX, YY, dx, dy, linewidth=0.5, cmap="magma", density=1, arrowsize=1)
    # Scatter generated data
    plt.scatter(X_gen[:1000, 0],
                X_gen[:1000, 1],
                s=20,
                color="coral",
                marker="o")

    l_gen = plt.Line2D((0, 1), (0, 0),
                       color='coral',
                       marker='o',
                       linestyle='',
                       markersize=20)
    l_D = plt.Line2D((0, 1), (0, 0), color='steelblue', linewidth=3)
    l_real = plt.Rectangle((0, 0), 1, 1, fc="steelblue")

    # Create legend from custom artist/label lists
    # bbox_to_anchor = (0.4, 1)
    ax.legend([l_real, l_D, l_gen],
              ['Real data KDE', 'Discriminator contour', 'Generated data'],
              fontsize=18,
              loc="upper left")
    ax.set_xlim(xmin, xmax)
    ax.set_ylim(ymin, ymax + 0.8)
    plt.savefig("../../figures/toy_dataset_iter%s.jpg" % gen_iter)
    plt.clf()
    plt.close()
Esempio n. 51
0
ax.legend(loc='lower center')

plt.show()

# %%

# %%
# loss of normalization, or integral of antiderivative...
bayesian_denominator_nonpw = 1 - norm(evolution.T[NPLOTPOINTS - 1])**2

# %%
fig, ax = plt.subplots(figsize=(12, 8))
ax.set_xlabel('t')
ax.set_ylabel('Detection probability density')
ax.plot(times, -np.gradient(norms**2, times), c='b', linewidth=2)

# %%
labels = PROB_LABELS
colors = ["#cc1111", "#33aa33", "#1111cc"]

fig, ax = plt.subplots(figsize=(22, 3))

ax.stackplot(times_extended,
             np.abs(evolution_extended[0])**2,
             np.abs(evolution_extended[1])**2,
             np.abs(evolution_extended[2])**2,
             labels=labels,
             colors=colors)

ax.legend(loc='upper right')
Esempio n. 52
0
def imageProcessing2(imagename, filtertype, filterval):

    #main image processing function

    path2file = os.path.join('./static', imagename + '.jpg')
    im = plt.imread(path2file)
    im.dtype = 'uint8'  #just in case frce 0, 255 channels
    x, y, z = im.shape
    imgray = im.mean(axis=-1, keepdims=1)
    cmap_type = 'bwr'

    filterval = float(filterval)
    if request.method == "GET":
        if filtertype == 'grayscale':
            # already computed above
            processed_im = imgray[:, :, 0]  #im.mean(axis=-1,keepdims=1)

            cmap_type = 'gray'

        if filtertype == 'lowpass':
            # use gaussian convolution with a provided standard deviation

            processed_im = ndimage.gaussian_filter(input=im, sigma=filterval)

        if filtertype == 'dx':
            #gradient in x direction along each x 1D array
            processed_im = np.zeros(im.shape)
            for i in list(range(0, y)):
                processed_im[:, i, 0] = np.gradient(imgray[:, i, 0], 0.1)

            processed_im = processed_im[:, :, 0]

        if filtertype == 'dy':
            #gradient in y dir, along each 1d array
            processed_im = np.zeros(im.shape)
            for i in list(range(0, x)):
                processed_im[i, :, 0] = np.gradient(imgray[i, :, 0], 0.1)

            processed_im = processed_im[:, :, 0]

        if filtertype == 'rotate':
            # rotation in degrees
            processed_im = ndimage.rotate(im, filterval)
            processed_im = processed_im[:, :, 0]

        # stash image in memory rather than writing to disk
        # use PIL and IO encoding
        proc_im_pil = Image.fromarray(processed_im).convert('RGB')
        data = io.BytesIO()
        proc_im_pil.save(data, "JPEG")
        encoded_img_data = base64.b64encode(data.getvalue())

    ## pseudo refresh page - > option to reset results with new query-> then render filter
    if request.method == "POST":
        picname = request.form['imagename']
        filtertype = request.form['filtertype']
        filterval = request.form['filterval']
        return redirect(
            url_for("imageProcessing2",
                    imagename=picname,
                    filtertype=filtertype,
                    filterval=filterval))

    # return full_filename image to server
    return render_template('imageproc.html',
                           img_data=encoded_img_data.decode('utf-8'),
                           data=meta_list)
Esempio n. 53
0
    plt.plot(e3d_t, e3d_dat, label=e3d_lab)

    plt.xlabel(xlab)
    plt.ylabel(ylab)
    plt.legend(prop={"family": "serif", "size": 11})

    plt.savefig(outfile, bbox_inches="tight")
    plt.close()


FILEPATH_iSVV = "/media/yorgos/HardDrive2/TGV/iSVV/time_evol.dat"
iSVV_t, iSVV_enst, iSVV_ke = read_stats_second(FILEPATH_iSVV)
FILEPATH_SS = "/media/yorgos/HardDrive2/TGV/SS/time_evol.dat"

SS_t, SS_enst, SS_ke = read_stats_first(FILEPATH_SS)
SS_enst2 = -np.gradient(SS_ke, SS_t)

FILEPATH_Reference = "/media/yorgos/HardDrive2/TGV/DNS_reference/TGV_Re10000.dat"
A = np.genfromtxt(FILEPATH_Reference, skip_header=47, delimiter='')
Ref_t = A[:, 0]
Ref_ke = A[:, 1]
Ref_enst = A[:, 2]

fig = plt.figure(1, figsize=(10, 4), edgecolor='none')
plt.subplots_adjust(left=None,
                    bottom=None,
                    right=None,
                    top=None,
                    wspace=0.25,
                    hspace=None)
ax1 = fig.add_subplot(121)
Esempio n. 54
0
m = Basemap(
    llcrnrlon=lon_min,
    llcrnrlat=lat_min,
    urcrnrlon=lon_max,
    urcrnrlat=lat_max,
    projection='merc',
    resolution='h',
    area_thresh=1000.,
)
parallels = np.arange(round(lat_min, 0), lat_max + 2, 2)
meridians = np.arange(round(lon_max, 0), lon_min - 2, -2)

s1 = np.ma.empty(u.shape)  #[ydim,xdim])
for t in range(u.shape[0]):
    dudy, dudx = np.gradient(u[t, :, :], dy, dx)
    dvdy, dvdx = np.gradient(v[t, :, :], dy, dx)
    #s1 = np.ma.empty([ydim,xdim])
    J = np.array([[0, 1], [-1, 0]])
    for i in range(ydim):
        for j in range(xdim):
            if (dudx[i, j] and dudy[i, j] and dvdx[i, j] and dvdy[i, j]
                    and u[t, i, j] and v[t, i, j]) is not np.ma.masked:
                Utemp = np.array([u[t, i, j], v[t, i, j]])
                Grad = np.array([[dudx[i, j], dudy[i, j]],
                                 [dvdx[i, j], dvdy[i, j]]])
                S = 0.5 * (Grad + np.transpose(Grad))
                s1[t, i, j] = -3600 * np.min(np.linalg.eig(S)[0])

            else:
                s1[t, i, j] = np.ma.masked
delta_alpha = 1.
# Factor of refinement of the strain rate increase
delta_alpha_factor = 50.
# Limit of the refinement: Minimum normalized strain rate increase
delta_alpha_min = .001
# Limit of the Temperature decrease
delta_T_min = 1  # K

# Iteration indicator
n = 0
# Indicator of the latest flame still burning
n_last_burning = 0
# List of peak temperatures
T_max = [np.max(f.T)]
# List of maximum axial velocity gradients
a_max = [np.max(np.abs(np.gradient(f.velocity) / np.gradient(f.grid)))]

# Simulate counterflow flames at increasing strain rates until the flame is
# extinguished. To achieve a fast simulation, an initial coarse strain rate
# increase is set. This increase is reduced after an extinction event and
# the simulation is again started based on the last burning solution.
# The extinction point is considered to be reached if the abortion criteria
# on strain rate increase and peak temperature decrease are fulfilled.
while True:
    n += 1
    # Update relative strain rates
    alpha.append(alpha[n_last_burning] + delta_alpha)
    strain_factor = alpha[-1] / alpha[n_last_burning]
    # Create an initial guess based on the previous solution
    # Update grid
    # Note that grid scaling changes the diffusion flame width
Esempio n. 56
0
def Seaangles_mod(numviews, thresholdimg):
    '''
    From Seaangles_mod.m
    
    Takes an image and extracts its Opening Angle Map
    
    Returns shorelines and shallowsea????
    
    '''

    Sx, Sy = np.gradient(thresholdimg)
    G = np.sqrt(Sx**2 + Sy**2)

    edges = (G > 0) & (thresholdimg > 0)

    bordermap = np.pad(np.zeros_like(edges), 1, 'edge')
    bordermap[:-2, 1:-1] = edges
    bordermap[0, :] = 1

    points = np.fliplr(np.array(np.where(edges > 0)).T)
    hull = ConvexHull(points, qhull_options='Qc')

    sea = np.fliplr(np.array(np.where(thresholdimg > 0.5)).T)

    points_to_test = [Point(i[0], i[1]) for i in sea]
    polygon = Polygon(points[hull.vertices]).buffer(0.01)

    In = np.array(map(lambda pt: polygon.contains(pt), points_to_test))
    Shallowsea_ = sea[In]

    seamap = np.zeros(bordermap.shape)
    flat_indices = map(lambda x: np.ravel_multi_index(x, seamap.shape),
                       np.fliplr(Shallowsea_))
    seamap.flat[flat_indices] = 1
    seamap[:3, :] = 0

    Deepsea_ = sea[~In]
    Deepsea = np.zeros((7, len(Deepsea_)))
    Deepsea[:2, :] = np.flipud(Deepsea_.T)
    Deepsea[-1, :] = 200.  # where does this 200 come from?

    Shallowsea = np.array(np.where(seamap > 0.5))
    shoreandborder = np.array(np.where(bordermap > 0.5))

    c1 = len(Shallowsea[0])
    c2 = len(shoreandborder[0])
    maxtheta = np.zeros((numviews, c1))

    for i in range(c1):

        diff = shoreandborder - Shallowsea[:, i, np.newaxis]
        x = diff[0]
        y = diff[1]

        angles = np.arctan2(x, y)
        angles = np.sort(angles) * 180. / np.pi

        dangles = angles[1:] - angles[:-1]
        dangles = np.concatenate(
            (dangles, [360 - (angles.max() - angles.min())]))
        dangles = np.sort(dangles)

        maxtheta[:, i] = dangles[-numviews:]

    allshore = np.array(np.where(edges > 0))
    c3 = len(allshore[0])
    maxthetashore = np.zeros((numviews, c3))

    for i in range(c3):

        diff = shoreandborder - allshore[:, i, np.newaxis]
        x = diff[0]
        y = diff[1]

        angles = np.arctan2(x, y)
        angles = np.sort(angles) * 180. / np.pi

        dangles = angles[1:] - angles[:-1]
        dangles = np.concatenate(
            (dangles, [360 - (angles.max() - angles.min())]))
        dangles = np.sort(dangles)

        maxthetashore[:, i] = dangles[-numviews:]

    waves1 = np.vstack([
        np.hstack([Shallowsea, Deepsea[:2, :]]),
        np.hstack([maxtheta.sum(axis=0), Deepsea[-1, :]])
    ])

    waves1s = sparse.csr_matrix((waves1[2, :], (waves1[0, :], waves1[1, :])),
                                shape=thresholdimg.shape)

    shoreline = np.vstack([allshore, maxthetashore.sum(axis=0)])

    picshore = sparse.csr_matrix(
        (shoreline[2, :], (shoreline[0, :], shoreline[1, :])),
        shape=thresholdimg.shape)

    shoreangles = np.vstack([allshore, maxthetashore])
    seaangles = np.hstack([np.vstack([Shallowsea, maxtheta]), Deepsea])

    return shoreangles, waves1s, seaangles, picshore
Esempio n. 57
0
        for row in np.arange(Nx):
            for col in np.arange(Ny):
                x = [xc[row], yc[col], zc[sl]]
                if (xc[row]**2 + yc[col]**2 + zc[sl]**2) <= r**2:
                    phantom[row, col, sl] = reconstruct_phantom(obj, x, dpc)
    '''plt.figure()
    extent = [-r/2.0, r/2.0, -r/2.0, r/2.0]
    res = plt.imshow((phantom[:,:,0]), cmap=plt.cm.gray, extent=extent, \
                     interpolation='none')
    plt.title('Phantom')
    plt.colorbar()
    plt.show()   '''

    # compute gradient of refraction index decrement
    # centered derivatives
    gradPhantom = array(np.gradient(phantom, dx, dy, dz))
    if dpc:
        obj1 = phantom
    else:
        obj1 = phantom

    # source point
    P = 0.1
    s = array([0])
    sp = array([R * np.cos(s), R * np.sin(s), P / (2.0 * np.pi) * s],
               dtype=TYPE)
    projection = siddon_cone_beam_projection(obj1, sp, s, shift_detector,
                                             alpha, w, Nr, Nc, r, D, Nx, Ny,
                                             Nz, dx, dy, dz, bx, by, bz, dpc)

    print projection
Esempio n. 58
0
    def simulate(self, track):
        # Calculate the first and second derivative of the points
        dX = np.gradient(self.xyz, axis=0)
        ddX = np.gradient(dX, axis=0)
              
        
        s = mag(dX)             #distance between points
        # magnitude of curvature
        k = mag(np.cross(dX, ddX))/mag(dX)**3
        
        T = dX / s[:,None]      #unit tangent (direction of travel)
        B = np.cross(dX, ddX)   #binormal
        B = B / mag(B)[:,None]          #unit binormal
        N = np.cross(B, T)      #unit normal vector
                
        # direction of curvature  (normal vector with magnitude 1/R)
        Nk = N * k[:,None]
        
        # car and track share tangent vector. We're not flying
        Tt = T
        
        #Rotate Tt 90deg CW in xy-plane
        Bt = Tt[:,[1, 0, 2]]
        Bt[:,1] *= -1        
        Bt[:,2] = self.slope         #align Bt with the track and normalize
        Bt = Bt / mag(Bt)[:,None]
        
        Nt = np.cross(Bt, Tt)
        
        proj_car_axis = lambda v: np.c_[dot(v, Tt), dot(v, Bt), dot(v, Nt)]
        k_car = proj_car_axis(Nk)          #curvature projected in car axis [lon, lat, z]
        g_car = proj_car_axis(np.array([0, 0, g])[None,:])   #direction of gravity in car axis [lon, lat, z]
        
     
#        k = kt[:,1]
#        g_lat = gt[:,1]
#       
        v_max = ((acc_grip_max - g_car[:,1]) / abs(k_car[:,1]).clip(1e-3))**0.5
        
        i = len(v_max)
        v_a = np.zeros(i)  #simulated speed maximum acceleration
        v_b = np.zeros(i)  #simulated speed maximum braking
        
      
        for i in range(-800,i):  #negative index to simulate running start....
            j = i-1 #index to previous timestep
            ## max possible speed accelerating out of corners
            if v_a[j] < v_max[j]:     #check if previous speed was lower than max

                acc_lat = v_a[j]**2 * k_car[j,1] + g_car[j,1]                  #calc lateral acceleration based on
                acc_lon = acc_lon_max / acc_grip_max * (acc_grip_max**2 - acc_lat**2)**0.5   #grip circle (no downforce accounted for)
                acc_lon -=  v_a[j]**2 * c_drag                                 #aerodynamic drag + 
                acc_lon -=  v_a[j] * c_roll                                    #rolling resistance + 
                if v_a[j]>27:
                    print()
                
                acc_lon -=  g_car[j,0]                                         #gravity up/down hill
                v_a[i] =  min( (v_a[j]**2 + 2*acc_lon * s[j])**0.5 ,  v_max[i])
            else:
                #acc_lon = 0
                v_a[i] =  min( v_a[j] ,  v_max[i])

            ## max possible speed braking into corners  (backwards lap)
            if v_b[j] < v_max[-i]:
                acc_lat = v_b[j]**2 * k_car[-i,1] + g_car[-i,1] 
                acc_lon = acc_lon_min  / acc_grip_max * (acc_grip_max**2 - acc_lat**2)**0.5
                acc_lon +=  v_b[j]**2 * c_drag
                acc_lon +=  v_b[j] * c_roll 
#                acc_lon +=  g_car[j,0]
                v_b[i] =   min( (v_b[j]**2 + 2*acc_lon * s[::-1][j])**0.5 ,  v_max[::-1][i])
            else:
                #acc_lon = 0
                v_b[i] =  min( v_b[j] ,  v_max[::-1][i])
    
    
        v_b = v_b[::-1] #flip te matrix
        self.speed = np.fmin(v_a, v_b)

        self.a_lat = self.speed**2 * Nk[:,1]
        self.a_lon = np.gradient(self.speed, s.cumsum())*self.speed

#        self.Nk = Nk
#        self.Nt = Nt
        self.Bt = Bt
        
        self.s = s
        

        self.laptime = sum(s) / self.speed.mean()
        return self.laptime
Esempio n. 59
0
def calculate_thermal_electronic_contribution(dos,
                                              t0=0,
                                              t1=2000,
                                              td=5,
                                              xdn=-100,
                                              xup=100,
                                              ndosmx=10001,
                                              dope=0.0,
                                              natom=1,
                                              gaussian=1000):
    """
    Calculate thermal electronic contribution from pymatgen Dos objects

    Parameters
    ----------
    dos : pymatgen.electronic_structure.dos.Dos
        DOS object
    t0 : float
        Start temperature
    t1 : float
        Final temperature
    td : float
        Temperature step size
    xdn : float
        Minimum energy of the DOS to consider
    xup : float
        Maximum energy of the DOS to consider
    ndosmx : int
        Size of grid to interpolate the DOS on
    dope : float
        Doping level
    natom : int
        Number of atoms in the cell
    gaussian : int
        Number of grid points in the Gaussian mesh near the Fermi energy

    Returns
    -------

    """
    n_electrons, fermi_shift, e, dos = getdos(dos, xdn, xup, dope, ndosmx,
                                              gaussian)

    # for all temperatures
    nT = int(np.round((t1 - t0) / td + 1.0))
    T = np.arange(t0, t1 + td, td)
    chemical_potential = np.zeros(nT)
    gmu0 = 0.0
    beta = 1.0 / (T * k_B)
    beta[0] = 1.0e30
    for i, t in enumerate(T):
        chemical_potential[i] = brentq(gfind,
                                       gmu0 - 10.0,
                                       gmu0 + 10.0,
                                       args=(e, dos, n_electrons, beta[i]),
                                       maxiter=10000)
    U_el = calculate_internal_energy(chemical_potential[:, np.newaxis],
                                     e[np.newaxis, :], dos[np.newaxis, :],
                                     beta[:, np.newaxis])
    S_el = calculate_entropy(chemical_potential[:,
                                                np.newaxis], e[np.newaxis, :],
                             dos[np.newaxis, :], beta[:, np.newaxis])
    C_el = np.gradient(U_el, td, edge_order=2)
    C_el[0] = 0

    # construct a dictionary of results
    results = {
        'temperature': T,
        'internal_energy': U_el / natom,
        'free_energy': (U_el - T * S_el - U_el[0]) / natom,
        'entropy': S_el / natom,
        'heat_capacity': C_el / natom,
        'chemical_potential': chemical_potential,
        'n_electrons': n_electrons,
    }

    return results
Esempio n. 60
0
def siddon_cone_beam_projection_flat_detector(objFileName,
                                              sp,
                                              s,
                                              u,
                                              w,
                                              Nr,
                                              Nc,
                                              r,
                                              R,
                                              D,
                                              H,
                                              Nx,
                                              Ny,
                                              Nz,
                                              dx,
                                              dy,
                                              dz,
                                              bx,
                                              by,
                                              bz,
                                              dpc=False):
    '''
    Computes 2D projection for cone beam x-ray source
    Input: obj - phantom (voxel values = attenuation coef., absorbtion) or
                 phantom (voxel values = refraction ind. decrement, DPC) 
           sp - array of source point position(s) 
           s - array of angular parameters for source point position(s)
           u, w - pixel coordinates for flat detector
           Nr/Nc - number of detector rows/ columns  
           r - radius of the ROI
           D - curved detector's radius 
           Nx, Ny, Nz - number of voxels
           dx, dy, dz - dimensions of voxels
           bx, by, bz - intersection of first x, y and z planes  
           delta - shift between source point projection and center of 
                   detector 
           dpc - use attenuation coefficient (if False) or 
                   use refraction index decrement (if True) to compute x-ray 
                   transform
    Output: Df - 2d projection measurments collected at particular source point
                 position
    '''

    Np = len(s)  # number of projections

    Df = np.zeros((Np, Nr, Nc), dtype=TYPE)
    for p in range(Np):
        print p
        # detector coordinate system
        eu = array([-sin(s[p]), cos(s[p]), 0], dtype=TYPE)
        ev = array([-cos(s[p]), -sin(s[p]), 0], dtype=TYPE)
        ez = array([0, 0, 1.0], dtype=TYPE)

        # partially load phantom data and compute gradient
        Nkmin, Nkmax = slice_range(sp[2, p], Nz, H, D, R, r)
        #load object data
        phantomf = h5py.File(objFileName, 'r')
        obj = phantomf['phantom'][:, :, range(int(Nkmin), int(Nkmax) + 1)]
        phantomf.close()
        # compute gradient of object
        if dpc:
            gradobj = np.array(np.gradient(obj, dx, dy, dz))

        refv = [0, 0, 0]
        for i in range(Nr):
            for j in range(Nc):
                theta = ray_flat_detector(D, u[j], w[i], eu, ev, ez)
                dp = theta + sp[:, p]
                if dpc:
                    # gradient projection onto vector which is perpendicular
                    # to ray direction (theta) and gradient line direction (ez)
                    # refv - vector pointing towards x-ray refraction
                    #      - equal to np.cross(theta, ez)
                    theta = theta / sqrt(sum(theta**2))
                    refv = np.zeros_like(theta)
                    refv[0] = theta[1]
                    refv[1] = -theta[0]
                    #print refv
                    Df[p, Nr - 1 - i,
                       j] = siddon_xray_transform(gradobj, dp, sp[:, p], r, D,
                                                  Nx, Ny, Nkmax, dx, dy, dz,
                                                  bx, by, bz, Nkmin, refv, dpc)

                else:
                    Df[p, Nr - 1 - i,
                       j] = siddon_xray_transform(obj, dp, sp[:, p], r, D, Nx,
                                                  Ny, Nkmax, dx, dy, dz, bx,
                                                  by, bz, Nkmin, refv, dpc)

    return Df