예제 #1
0
def read_input_spectra(fname, numbvals):
    # Read grid of model spectra from the <fname> file
    #
    # :rtype: list of tuples containing model spectra.
    #
    # :param fname: name of the file containing the grid of model spectra.
    # :param numbvals: list with total grid number of model parameters.

    d = np.loadtxt(fname)
    parameter_combinations = np.prod(numbvals)
    numspectra = len(d.T) - 1
    # spectra in columns, 1st column is energy grid, skip it
    if numspectra != parameter_combinations:
        raise NameError(
            fname
            + ": No. of spectra "
            + np.str(numspectra)
            + ", different from declared param combinations "
            + np.str(parameter_combinations)
            + "\n"
        )
    input_spectra = []
    for i in range(len(d.T) - 1):
        # d[:-1] - match no. of energy bins;
        # T[1:] - skip the 1st column with energy vector
        input_spectra.append(tuple(d[:-1].T[1:][i]))

    return input_spectra
예제 #2
0
파일: species.py 프로젝트: jmcgover/cplop
def main():
    if len(sys.argv) < 4:
        print_usage("Not enough arguments!")
    alpha = numpy.float64(sys.argv[1])
    filename= numpy.str(sys.argv[2])
    k_limit = 12
    species = []
    for i, s in enumerate(sys.argv[3:]):
        species.append(numpy.str(s))
    species.sort()
    print("species",str(species))
    print("Reading %s" % filename)
    data = atpy_csv(filename)

    if species == 'Overall' or species == 'overall':
        data = filter_by_keyvalue(data, 'alpha', alpha)

    metrs = []
    for s in species:
        print("Calculating %s at %s" % ('mean', s))
        metrs.append(calc_metrics(data, k_limit, alpha, s))
        print(metrs[-1])

    all_metrics = dict(zip(species, metrs))

    metr_fname = "./figures/%s-ALL-metrics-%d-%s.pdf" % (str(species).replace(']','').replace('[','').replace("'",""), k_limit, str(numpy.around(alpha, decimals=2)))
    plot_metrics_per_k(species, alpha, all_metrics, 'Algorithm: ' + filename.split('.')[0], metr_fname)

    pvr_fname = "./figures/%s-ALL-pvr-%d-%s.pdf" % (str(species).replace(']','').replace('[','').replace("'",""), k_limit, str(numpy.around(alpha, decimals=2)))
    print(pvr_fname)
    plot_precision_v_recall(species, alpha, all_metrics, 'Algorithm: ' + filename.split('.')[0], pvr_fname)

    return 0
예제 #3
0
    def test_RTcoefs(self, kind_compress='Vinet', compress_order=3,
                     compress_path_const='T', kind_RTpoly='V',
                     RTpoly_order=5, natom=1):

        TOL = 1e-3

        Nsamp = 10001
        eos_mod = self.load_eos(kind_compress=kind_compress,
                                compress_order=compress_order,
                                compress_path_const=compress_path_const,
                                kind_RTpoly=kind_RTpoly,
                                RTpoly_order=RTpoly_order,
                                natom=natom)

        V0, = eos_mod.get_param_values(param_names='V0')
        Vmod_a = np.linspace(.5,1.2,Nsamp)*V0
        dV = Vmod_a[1] - Vmod_a[0]

        acoef_a, bcoef_a = eos_mod.calc_RTcoefs(Vmod_a)
        acoef_deriv_a, bcoef_deriv_a = eos_mod.calc_RTcoefs_deriv(Vmod_a)

        a_abs_err, a_rel_err, a_range_err = self.numerical_deriv(
            Vmod_a, acoef_a, acoef_deriv_a, scale=1)

        b_abs_err, b_rel_err, b_range_err = self.numerical_deriv(
            Vmod_a, bcoef_a, bcoef_deriv_a, scale=1)


        assert a_range_err < TOL, 'range error in acoef, ' + \
            np.str(a_range_err) + ', must be less than TOL, ' + np.str(TOL)

        assert b_range_err < TOL, 'range error in bcoef, ' + \
            np.str(b_range_err) + ', must be less than TOL, ' + np.str(TOL)
예제 #4
0
    def _calc_test_heat_capacity(self, kind_compress='Vinet',
                                 compress_path_const='T',
                                 kind_gamma='GammaFiniteStrain',
                                 kind_RTpoly='V', RTpoly_order=5, natom=1,
                                 kind_electronic='None',
                                 apply_electronic=False):

        TOL = 1e-3
        Nsamp = 10001

        eos_mod = self.load_eos(kind_compress=kind_compress,
                                compress_path_const=compress_path_const,
                                kind_gamma=kind_gamma, kind_RTpoly=kind_RTpoly,
                                RTpoly_order=RTpoly_order, natom=natom,
                                kind_electronic=kind_electronic,
                                apply_electronic=apply_electronic)

        Tmod_a = np.linspace(3000.0, 8000.0, Nsamp)

        V0, = eos_mod.get_param_values(param_names=['V0'])
        # Vmod = V0*(0.6+.5*np.random.rand(Nsamp))
        Vmod = V0*0.7

        thermal_energy_a = eos_mod.thermal_energy(Vmod, Tmod_a)
        heat_capacity_a = eos_mod.heat_capacity(Vmod, Tmod_a)

        abs_err, rel_err, range_err = self.numerical_deriv(
            Tmod_a, thermal_energy_a, heat_capacity_a, scale=1)

        Cvlimfac = eos_mod.calculators['thermal']._get_Cv_limit()
        assert rel_err < TOL, 'rel-error in Cv, ' + np.str(rel_err) + \
            ', must be less than TOL, ' + np.str(TOL)
예제 #5
0
    def calc_test_RTcoefs(self, kind_compress='Vinet',
                           compress_path_const='T',
                           kind_gamma='GammaFiniteStrain', kind_RTpoly='V',
                           RTpoly_order=5, natom=1, kind_electronic='None',
                           apply_electronic=False):

        TOL = 1e-3

        Nsamp = 10001
        eos_mod = self.load_eos(kind_compress=kind_compress,
                                compress_path_const=compress_path_const,
                                kind_gamma=kind_gamma, kind_RTpoly=kind_RTpoly,
                                RTpoly_order=RTpoly_order, natom=natom,
                                kind_electronic=kind_electronic,
                                apply_electronic=apply_electronic)

        V0, = eos_mod.get_param_values(param_names='V0')
        Vmod_a = np.linspace(.5,1.2,Nsamp)*V0
        dV = Vmod_a[1] - Vmod_a[0]

        bcoef_a = eos_mod.calc_RTcoefs(Vmod_a)
        bcoef_deriv_a = eos_mod.calc_RTcoefs_deriv(Vmod_a)

        b_abs_err, b_rel_err, b_range_err = self.numerical_deriv(
            Vmod_a, bcoef_a, bcoef_deriv_a, scale=1)

        assert b_range_err < TOL, 'range error in bcoef, ' + \
            np.str(b_range_err) + ', must be less than TOL, ' + np.str(TOL)
예제 #6
0
    def test_press_simple(self, kind_compress='Vinet',
                          compress_path_const='T',
                          kind_gamma='GammaFiniteStrain',
                          kind_RTpoly='V', RTpoly_order=5, natom=1,
                          kind_electronic='CvPowLaw', apply_electronic=True):

        TOL = 1e-3
        Nsamp = 10001
        eos_mod = self.load_eos(kind_compress=kind_compress,
                                compress_path_const=compress_path_const,
                                kind_gamma=kind_gamma, kind_RTpoly=kind_RTpoly,
                                RTpoly_order=RTpoly_order, natom=natom,
                                kind_electronic=kind_electronic,
                                apply_electronic=apply_electronic)

        refstate_calc = eos_mod.calculators['refstate']
        T0 = refstate_calc.ref_temp()
        V0 = refstate_calc.ref_volume()
        S0 = refstate_calc.ref_entropy()
        # V0, T0, S0 = eos_mod.get_param_values(param_names=['V0','T0','S0'])

        Vmod_a = np.linspace(.7,1.2,Nsamp)*V0
        T = 8000
        dV = Vmod_a[1] - Vmod_a[0]

        P_a = eos_mod.press(Vmod_a, T)
        F_a = eos_mod.helmholtz_energy(Vmod_a, T)
        abs_err, rel_err, range_err = self.numerical_deriv(
              Vmod_a, F_a, P_a, scale=-core.CONSTS['PV_ratio'])

        S_a = eos_mod.entropy(Vmod_a, T)


        assert abs_err < TOL, ('abs error in Press, ' + np.str(abs_err) +
                                 ', must be less than TOL, ' + np.str(TOL))
예제 #7
0
    def _calc_test_heat_capacity(self, kind_thermal='Debye',
                                 kind_gamma='GammaPowLaw',
                                 kind_compress='Vinet',
                                 compress_path_const='T', natom=1):

        TOL = 1e-3
        Nsamp = 10001

        eos_mod = self.load_eos(kind_thermal=kind_thermal,
                                kind_gamma=kind_gamma,
                                kind_compress=kind_compress,
                                compress_path_const=compress_path_const,
                                natom=natom)

        Tmod_a = np.linspace(300.0, 3000.0, Nsamp)

        V0, = eos_mod.get_param_values(param_names=['V0'])
        # Vmod_a = V0*(0.6+.5*np.random.rand(Nsamp))
        Vmod = V0*0.9

        thermal_energy_a = eos_mod.thermal_energy(Vmod, Tmod_a)
        heat_capacity_a = eos_mod.heat_capacity(Vmod, Tmod_a)

        abs_err, rel_err, range_err = self.numerical_deriv(
            Tmod_a, thermal_energy_a, heat_capacity_a, scale=1)

        Cvlimfac = eos_mod.calculators['thermal']._get_Cv_limit()
        assert rel_err < TOL, 'rel-error in Cv, ' + np.str(rel_err) + \
            ', must be less than TOL, ' + np.str(TOL)
예제 #8
0
 def update_task(self, filename):
     #set scheduled
     with open(filename) as f:
         d = dict(filter(None, csv.reader(f,  delimiter=' ', skipinitialspace=True))) 
         taskid = d['taskID']
         AST= d['AST']                 
         f.close()
 
     #update task status in emoncms
     h = httplib2.Http("/tmp/emoncms/.cache")
     minutes=np.int(AST)%3600
     minutes=minutes/60
     hours=np.int(AST)/3600 
          
     request = "{'status':1,'AST':'"+np.str(hours)+":"+np.str(minutes)+"'}"
     h.request("http://localhost/emoncms/mas/update.json?id="+taskid+"&json="+request+"&apikey="+self.apikey, "GET")
     sys.stderr.write("http://localhost/emoncms/mas/update.json?id="+taskid+"&json="+request+"&apikey="+self.apikey)
     #delay should be not 20 but AST-Time.time
     now = datetime.datetime.now()
     midnight = now.replace(hour=0, minute=0, second=0, microsecond=0)
     seconds = (now - midnight).seconds
     countdown = np.int(AST)-seconds
     #countdown =20
     if(countdown > 0):
         t=Timer(countdown, self.taskexec, [taskid])
         t.start()
         self.scheduled[taskid]=0
예제 #9
0
파일: waves.py 프로젝트: vilandra/pynmd
def wave_length(period,h):
    '''
    Compute wave length using linear wave theory
    
    Parameters
    ----------
    period   : wave period [s]
    h        : water depth [m]
    
    Results
    -------
    wl_int   : real wave length [m]
    
    Screen output
    -------------
    wl_deep  : deep water wave length [m]
    wl_sha   : shallow water wave length [m]
    
    '''
    
    wl_deep = 9.81 * period**2 / 2.0 / np.pi
    wl_sha = period * np.sqrt(9.81 * h)
    k = dispersion(period,h)
    wl_int = 9.81 / 2.0 / np.pi * period**2 * np.tanh(k*h)
    
    print(' ')
    print('---------------------------------------------------------')
    print('Wave Length deep water approx      = ' + np.str(wl_deep) + ' m')
    print('Wave Length shallow water approx   = ' + np.str(wl_sha) + ' m')
    print('Wave Length linear wave theory     = ' + np.str(wl_int) + ' m')
    print('---------------------------------------------------------')
    print(' ')
    
    return wl_int
def load_fortranfile(T):

	if T < 100:
		file1 = '../outputs/psi1/psi1_0'+np.str(T)+'.bin'
		file2 = '../outputs/psi2/psi2_0'+np.str(T)+'.bin'

	else:
		file1 = '../outputs/psi1/psi1_'+np.str(T)+'.bin'
		file2 = '../outputs/psi2/psi2_'+np.str(T)+'.bin'		


	PSI1 = fortranfiles.FortranFile(file1) 
	PSI1 = PSI1.readReals()


	PSI2 = fortranfiles.FortranFile(file2) 
	PSI2 = PSI2.readReals()



	# Redimensionalizing files
	PSI1 = PSI1.reshape((257,512))
	PSI2 = PSI2.reshape((257,512))



	return PSI1,PSI2
예제 #11
0
파일: waves.py 프로젝트: garciaga/pynmd
def wave_length(period, h, verbose=True):
    """
    Compute wave length using linear wave theory

    Parameters
    ----------
    period   : wave period [s]
    h        : water depth [m]

    Results
    -------
    wl_int   : real wave length [m]

    Screen output
    -------------
    wl_deep  : deep water wave length [m]
    wl_sha   : shallow water wave length [m]

    """

    wl_deep = 9.81 * period ** 2 / 2.0 / np.pi
    wl_sha = period * np.sqrt(9.81 * h)
    k = dispersion(period, h)
    wl_int = 9.81 / 2.0 / np.pi * period ** 2 * np.tanh(k * h)

    if verbose:
        print(" ")
        print("---------------------------------------------------------")
        print("Wave Length deep water approx      = " + np.str(wl_deep) + " m")
        print("Wave Length shallow water approx   = " + np.str(wl_sha) + " m")
        print("Wave Length linear wave theory     = " + np.str(wl_int) + " m")
        print("---------------------------------------------------------")
        print(" ")

    return wl_int
예제 #12
0
    def calc_test_press(self, path_const='T'):

        TOL = 1e-3

        Nsamp = 10001
        eos_mod = self.load_eos(path_const=path_const)

        V0, = eos_mod.get_param_values(param_names='V0')
        V0 += -.137
        eos_mod.set_param_values(V0,param_names='V0')

        V0get, = eos_mod.get_param_values(param_names='V0')

        assert V0 == V0get, 'Must be able to store and retrieve non-integer values'

        assert np.abs(eos_mod.press(V0))<TOL/100,(
            'pressure at V0 must be zero by definition'
        )

        Vmod_a = np.linspace(.7,1.2,Nsamp)*V0
        dV = Vmod_a[1] - Vmod_a[0]

        press_a = eos_mod.press(Vmod_a)
        energy_a = eos_mod.energy(Vmod_a)

        abs_err, rel_err, range_err = self.numerical_deriv(
            Vmod_a, energy_a, press_a, scale=-core.CONSTS['PV_ratio'])

        assert range_err < TOL, 'range error in Press, ' + np.str(range_err) + \
            ', must be less than TOL, ' + np.str(TOL)
예제 #13
0
def freq_spec_1d(eta,dt=1,verbose=True):
    """
    Computes the frequency spectrum from a given time series.
    
    freq,spec = freq_spec_1d(eta,dt,verbose)
    
    PARAMETERS:
    -----------
    eta      : Time series of water surface elevation [m]
    dt       : Time step [s]
    verbose  : Display computed bulk parameters to the screen
    
    RETURNS:
    --------
    freq     : Frequency vector
    spec     : Variance spectrum (Power spectrum)
    
    NOTES:
    ------
    This is really a copy of gsignal.psdraw. If results differ, trust gsignal
      this code will not be updated.
    """
    
    # Remove mean
    eta -= eta.mean()

    # Compute record length
    N = eta.shape[0]
    
    # Compute fourier frequencies
    fj = np.fft.fftfreq(N,dt)

    # Compute power spectral density (Cooley-Tukey Method)
    yf = np.fft.fft(eta)/N
    psd = N*dt*yf*np.conjugate(yf)

    # One sided psd from dft
    if np.mod(N,2) == 0:
        sf = np.concatenate((np.array([psd[0]]),2.0*psd[1:N/2],
                             np.array([psd[N/2]])))
        freq_amp = np.abs(np.concatenate((np.array([fj[0]]),fj[1:N/2],
                                          np.array([fj[N/2]]))))
    else:
        sf = np.concatenate((np.array([psd[0]]),2.0*psd[1:(N+1)/2]))
        freq_amp = np.abs(np.concatenate((np.array([fj[0]]),fj[1:(N+1)/2])))

    sf = sf.real
    
    if verbose:
        print("===============================================")
        print("Bulk Wave Parameters:")
        print("Hs = " + np.str(4.004*np.sqrt(np.trapz(sf,freq_amp))) + "m")
        print("H1 = "+np.str(4.004*np.sqrt(np.trapz(sf,freq_amp))*2.0/3.0)+"m")
        print("Spectral Parameters:")
        print("Nyquist Frequency = " + np.str(1.0/(2.0*dt)) + "Hz")
        print("Frequency interval = " + np.str(1.0/(N*dt)) + "Hz")
        print("===============================================")

    # End of function
    return freq_amp,sf
예제 #14
0
파일: roms_post.py 프로젝트: vilandra/pynmd
def roms_to_swan_bathy_curv(hisfile,outfld):
  ''' 
  Generate a SWAN bathymetry file from either a ROMS history or bathymetry input
  file. 
  
  roms_to_swan_bathy_curv(hisfile,outfld)
  
  Parameters
  ----------
  hisfile  : ROMS history or bathymetry input netCDF file
  outfld   : Folder to save the output files
  
  Returns
  -------
  Two text files (swan_bathy.bot, swan_coord.grd) that contain the bathymetry 
  and coordinates of the grid for SWAN input. 
  
  Notes
  -----
    
    
  '''  
   
  # Load variables of interest from the ocean_his.nc file
  ncfile = netCDF4.Dataset(hisfile,'r')  
  h = ncfile.variables['h'][:]
  x_rho = ncfile.variables['x_rho'][:]
  y_rho = ncfile.variables['y_rho'][:]  
  ncfile.close()
  
   
  # Print text file with extended and interpolated bathymetry
  fid = open(outfld+'/swan_bathy.bot', 'w')  
  for aa in range(h.shape[0]):
      for bb in range(h.shape[1]):
          fid.write('%12.4f' % h[aa,bb])
      fid.write('\n')
  fid.close()
  
  # Print text file with extended and interpolated bathymetry
  fid = open(outfld+'/swan_coord.bot', 'w')  
  for aa in range(x_rho.shape[0]):
      for bb in range(x_rho.shape[1]):
          fid.write('%12.6f' % x_rho[aa,bb])
          fid.write('\n')
  for aa in range(y_rho.shape[0]):
      for bb in range(y_rho.shape[1]):
          fid.write('%12.6f' % y_rho[aa,bb])
          fid.write('\n')          
  fid.close()  
  
  #---------------------------------------------------------- Output for swan.in
  print ' '
  print "========================================================"
  print "Created swan_coord.grd and swan_bathy.bot"
  print ('CGRID CURVILINEAR ' + np.str(h.shape[1]-1) + ' ' + 
         np.str(h.shape[0]-1) + ' CIRCLE ...')
  print ('INPGRID BOTTTOM CURVILINEAR 0 0 ' + np.str(h.shape[1]-1) + ' ' + 
         np.str(h.shape[0]-1) + ' EXC ...')
  print "========================================================"  
예제 #15
0
def laserScan2D(width, height, delta, peakArea, fileName):
    # Scans an area with given width and height at a
    # step rate of delta. peakArea is int value for
    # the location of peak with a scale from 0 to 10,000

    startTime = time.clock()
    h = 0
    w = 0
    n = 0
    m = 0

    x = np.arange(0, width + delta, delta)
    y = np.arange(0, height + delta, delta)
    Y, X = np.meshgrid(y, x)
    tValues = np.zeros((np.size(x), np.size(y)))
    vValues = np.zeros((np.size(x), np.size(y)))

    # set up scope
    scanRange = 1000
    scope = vi.instrument("TCPIP::138.67.12.235::INSTR")
    sRead.setParam(scope, peakArea - scanRange, peakArea + scanRange)

    # get motor and zero location
    motor = mC.setupMotor()

    while w <= width:
        h = 0
        m = 0
        while h <= height:
            mC.moveTo(motor, w, h)
            time.sleep(0.5)
            x, y = sRead.getData(scope, peakArea - scanRange, peakArea + scanRange)
            t, v = findPeak(x, y)
            tValues[n, m] = t
            vValues[n, m] = v
            h = h + delta
            m = m + 1
        w = w + delta
        n = n + 1

        # Estimates Time Left
        timeLeft = (width - w) / w * (time.clock() - startTime) / 60
        print "Est. Time Left " + np.str(timeLeft) + "min"
    mC.moveTo(motor, 0, 0)

    # Contour Plot of Time
    makePlot2D(X, Y, tValues, fileName + " Time")

    # Contour Plot of Voltage
    makePlot2D(X, Y, vValues, fileName + " Voltage")

    # File Output
    np.savez(fileName + ".npz", X=X, Y=Y, tValues=tValues, vValues=vValues)

    # Time Taken Calc
    timeTaken = (time.clock() - startTime) / 60  # in min
    print "Time Taken " + np.str(timeTaken)
    motor.close()
    scope.close()
    return timeTaken, tValues
예제 #16
0
def save_orbit( x, y, z, filename ):
	ff = open( filename + '.3d', 'w' )
	for i in range(len(x)):
		ff.write( np.str(x[i]) + "," +
				  np.str(y[i]) + "," +
				  np.str(z[i]) + "\n" )
	ff.close()
	return
예제 #17
0
def iter_to_str(iteration, maximum):
    """ Converts an iteration number to string.

    Uses the maximum as second input to guarantee equal length for all.

    """
    cur_trial_len = len(np.str(iteration))
    return ((len(np.str(np.int(maximum)+1))-cur_trial_len) * '0') + np.str(iteration)
예제 #18
0
def time_lag(eta,ot,lags=None):
    """
    Function to compute average time lag between the wave staffs
    
    USAGE:
    ------
    ot_lag = time_lag(eta,ot,lags)
    
    PARAMETERS:
    -----------
    eta    : Numpy array of water surface elevation and time
             eta.shape = (time,points)
    ot     : Time vector (numpy array)
    lags   : Number of lags to compute
    
    RETURNS:
    --------
    ot_lag : Numpy array of the same dimensions as eta with the time lagged
             arrays.
    
    DEPENDENCIES:
    -------------
    gsignal.cross_corr
    
    """
    
    # Verify the requested lags
    if not lags:
        lags = np.floor(ot.shape[0]/2)
    
    # Cumulative lag time 
    cum_lag_time = np.zeros((eta.shape[1],))
    
    # Time interval
    dt = ot[2] - ot[1]
    
    # Loop over points
    for aa in range(1,cum_lag_time.shape[0]):
        
        # Find the time lagged cross-correlation to adjust the time series
        rho,stats = gsignal.cross_corr(eta[:,aa-1],eta[:,aa],lags)
        
        # Identify the maximum auto correlation
        if np.max(rho) < 0.8:
            print('Warning: Correlation is less than 0.8')
            print('  aa = ' + np.str(aa))
            print('  r = ' + np.str(np.max(rho)))
            
        # Compute cumulative lag time
        cum_lag_time[aa] = cum_lag_time[aa-1] + stats[np.argmax(rho),0] * dt
    
    # Create output array based on lag time
    ot_lag = np.zeros_like(eta)
    for aa in range(cum_lag_time.shape[0]):
        ot_lag[:,aa] = ot - cum_lag_time[aa]
        
    # Exit function
    return ot_lag
예제 #19
0
def makerunfake(rundir, base, param_file, nstart, nruns):
	for i in range(nstart, nstart+nruns):
		fakeparam = param_file+".fake_"+np.str(i)
		outfile = "runfake"+np.str(i)
		f = open(outfile, 'w')
		f.write("cd " + rundir+"\n")
		f.write("dolphot " + base+"_"+np.str(i)+ " -p" + fakeparam + " >> fake.log_"+np.str(i))
		f.close()
		subprocess.call("chmod +x " + outfile, shell=True)
예제 #20
0
파일: roms_post.py 프로젝트: garciaga/pynmd
def roms_to_swan_bathy_curv(hisfile, outfld):
    """ 
  Generate a SWAN bathymetry file from either a ROMS history or bathymetry input
  file. 
  
  roms_to_swan_bathy_curv(hisfile,outfld)
  
  Parameters
  ----------
  hisfile  : ROMS history or bathymetry input netCDF file
  outfld   : Folder to save the output files
  
  Returns
  -------
  Two text files (swan_bathy.bot, swan_coord.grd) that contain the bathymetry 
  and coordinates of the grid for SWAN input. 
  
  Notes
  -----
    
    
  """

    # Load variables of interest from the ocean_his.nc file
    ncfile = netCDF4.Dataset(hisfile, "r")
    h = ncfile.variables["h"][:]
    x_rho = ncfile.variables["x_rho"][:]
    y_rho = ncfile.variables["y_rho"][:]
    ncfile.close()

    # Print text file with extended and interpolated bathymetry
    fid = open(outfld + "/swan_bathy.bot", "w")
    for aa in range(h.shape[0]):
        for bb in range(h.shape[1]):
            fid.write("%12.4f" % h[aa, bb])
        fid.write("\n")
    fid.close()

    # Print text file with extended and interpolated bathymetry
    fid = open(outfld + "/swan_coord.bot", "w")
    for aa in range(x_rho.shape[0]):
        for bb in range(x_rho.shape[1]):
            fid.write("%12.6f" % x_rho[aa, bb])
            fid.write("\n")
    for aa in range(y_rho.shape[0]):
        for bb in range(y_rho.shape[1]):
            fid.write("%12.6f" % y_rho[aa, bb])
            fid.write("\n")
    fid.close()

    # ---------------------------------------------------------- Output for swan.in
    print(" ")
    print("=====================================================================")
    print("Created swan_coord.grd and swan_bathy.bot")
    print("CGRID CURVILINEAR " + np.str(h.shape[1] - 1) + " " + np.str(h.shape[0] - 1) + " CIRCLE ...")
    print("INPGRID BOTTTOM CURVILINEAR 0 0 " + np.str(h.shape[1] - 1) + " " + np.str(h.shape[0] - 1) + " EXC ...")
    print("=====================================================================")
예제 #21
0
    def computeDFF(self,secsWindow=5,quantilMin=8,method='only_baseline',order='C'):
        """
        compute the DFF of the movie or remove baseline
        In order to compute the baseline frames are binned according to the window length parameter
        and then the intermediate values are interpolated.
        Parameters
        ----------
        secsWindow: length of the windows used to compute the quantile
        quantilMin : value of the quantile
        method='only_baseline','delta_f_over_f','delta_f_over_sqrt_f'

        Returns
        -----------
        self: DF or DF/F or DF/sqrt(F) movies
        movBL=baseline movie
        """

        print("computing minimum ..."); sys.stdout.flush()
        minmov=np.min(self)

        if np.min(self)<=0 and method != 'only_baseline':
            raise ValueError("All pixels must be positive")

        numFrames,linePerFrame,pixPerLine=np.shape(self)
        downsampfact=int(secsWindow*self.fr);
        elm_missing=int(np.ceil(numFrames*1.0/downsampfact)*downsampfact-numFrames)
        padbefore=int(np.floor(old_div(elm_missing,2.0)))
        padafter=int(np.ceil(old_div(elm_missing,2.0)))

        print(('Inizial Size Image:' + np.str(np.shape(self)))); sys.stdout.flush()
        self=movie(np.pad(self,((padbefore,padafter),(0,0),(0,0)),mode='reflect'),**self.__dict__)
        numFramesNew,linePerFrame,pixPerLine=np.shape(self)

        #% compute baseline quickly
        print("binning data ..."); sys.stdout.flush()
#        import pdb
#        pdb.set_trace()
        movBL=np.reshape(self,(downsampfact,int(old_div(numFramesNew,downsampfact)),linePerFrame,pixPerLine),order=order);
        movBL=np.percentile(movBL,quantilMin,axis=0);
        print("interpolating data ..."); sys.stdout.flush()
        print((movBL.shape))

        movBL=scipy.ndimage.zoom(np.array(movBL,dtype=np.float32),[downsampfact ,1, 1],order=0, mode='constant', cval=0.0, prefilter=False)

        #% compute DF/F
        if method == 'delta_f_over_sqrt_f':
            self=old_div((self-movBL),np.sqrt(movBL))
        elif method == 'delta_f_over_f':
            self=old_div((self-movBL),movBL)
        elif method  =='only_baseline':
            self=(self-movBL)
        else:
            raise Exception('Unknown method')

        self=self[padbefore:len(movBL)-padafter,:,:];
        print(('Final Size Movie:' +  np.str(self.shape)))
        return self,movie(movBL,fr=self.fr,start_time=self.start_time,meta_data=self.meta_data,file_name=self.file_name)
예제 #22
0
def get_histogram(peakstats,nbins=100,pplot=True,min_width=1,max_width=20,min_sn=3,min_diffchis=.5,sizecal=1.,plotfile=None,plotdiff=True,fignum=None, running=False):
    """
    function to create histogram from fit statistics of peaks. Use several cuts. Returns
    histogram dictionary. If available, sizecal calibrates x axis to nM radius.
    if 'running' then use fignum to ovewrited histogram. to be used when plotting histogram while running the fits at the same time
    """
    pkhist=None
    intotal=len(peakstats['pk_size'])
    pksizes=np.array(peakstats['pk_size'][(peakstats['pk_width']>min_width) & (peakstats['pk_width']<max_width) & (peakstats['pk_sn']>min_sn) & (peakstats['pk_diffchis']>min_diffchis)])
    outtotal=len(pksizes)
    if outtotal>-1:
        pksizes=pksizes**0.333
        if len(pksizes)>0:
            pkhist=np.histogram(pksizes,bins=nbins,range=[0,np.max(pksizes)])
            histmin=np.min(pkhist[0])
            histmax=np.max(pkhist[0])
            pkhistraw=np.histogram((peakstats['pk_size'])**0.333,bins=nbins,range=[0,np.max(pksizes)])
        if len(pksizes)==0:
            pkhistraw=np.histogram((peakstats['pk_size'])**0.333,bins=nbins)
            histmin=np.min(pkhistraw[0])
            histmax=np.max(pkhistraw[0])
        if pplot:
            if running==False:
                plt.figure(fignum)
                plt.hold(True)
                plt.bar(sizecal*pkhistraw[1][1:],(pkhistraw[0]),width=pkhistraw[1][2]-pkhistraw[1][1],label='Raw, n='+np.str(intotal),color='blue')
                if len(pksizes)>0:
                    plt.bar(sizecal*pkhist[1][1:],(pkhist[0]),width=pkhist[1][2]-pkhist[1][1],label='Filtered, n='+np.str(outtotal),color='red')
                if plotdiff==True:
                    plt.bar(sizecal*pkhistraw[1][1:],(pkhistraw[0]-pkhist[0]),width=pkhistraw[1][2]-pkhistraw[1][1],label='Difference',color='green')
                plt.legend()
                plt.xlabel('Particle diameter (not normalized)')
                plt.ylabel('Number of particles')
                plt.title('Cuts: '+np.str(min_width)+'<width<'+np.str(max_width)+',s/n >'+np.str(min_sn)+',Chisqdiff>'+np.str(min_diffchis))
                plt.draw()

            if running==True:
                plt.ion()
                if fignum==None:
                    fignum=100
                plt.figure(fignum)
                plt.close()
                plt.hold(False)
                plt.bar(sizecal*pkhistraw[1][1:],(pkhistraw[0]),width=pkhistraw[1][2]-pkhistraw[1][1],label='Raw, n='+np.str(intotal),color='blue')
                plt.hold(True)
                if len(pksizes)>0:
                    plt.bar(sizecal*pkhist[1][1:],(pkhist[0]),width=pkhist[1][2]-pkhist[1][1],label='Filtered, n='+np.str(outtotal),color='red')
                plt.legend()
                plt.xlabel('Particle diameter (not normalized)')
                plt.ylabel('Number of particles')
                plt.title('Cuts: '+np.str(min_width)+'<width<'+np.str(max_width)+',s/n >'+np.str(min_sn)+',Chisqdiff>'+np.str(min_diffchis))
                plt.show()
                print 'did I plot?'
            if plotfile!=None:
                plt.savefig(plotfile)
    return pkhist
예제 #23
0
def makephotfiles(base, nstart, nruns, nimages):
	for i in range(nstart,nstart+nruns):
		for j in range(1, nimages+1):
			subprocess.call("ln -s "+base+"."+np.str(j)+".res.fits "  + base+"_"+np.str(i)+"."+np.str(j)+".res.fits", shell=True)
			subprocess.call("ln -s "+base+"."+np.str(j)+".psf.fits " + base+"_"+np.str(i)+"."+np.str(j)+".psf.fits", shell=True)
			subprocess.call("ln -s "+base+".info " + base+"_"+np.str(i)+".info", shell=True)
			subprocess.call("ln -s "+base+".apcor " + base+"_"+np.str(i)+".apcor", shell=True)
			subprocess.call("ln -s "+base+".psfs " + base+"_"+np.str(i)+".psfs", shell=True)
			subprocess.call("ln -s "+base+".columns " + base+"_"+np.str(i)+".columns", shell=True)

		subprocess.call("ln -s "+base + " " + base+"_"+np.str(i), shell=True)
예제 #24
0
def plt_gains(vis, nu, img_name='out.png', bad_chans=[]):
    """ Plot grid of transit phases to check if both 
    fringestop and the calibration worked. If most
    antennas end up with zero phase, then the calibration worked.

    Parameters
    ----------
    vis : np.ndarray[nfreq, ncorr, ntimes]
        Visibility array 
    nu  : int
        Frequency index to plot up
    """
    fig = plt.figure(figsize=(14, 14))
    
    # Plot up 128 feeds correlated with antenna "ant"

    ant = 1

    # Try and estimate the residual phase error 
    # after calibration. Zero would be a perfect calibration.
    angle_err = 0

    # Go through 128 feeds plotting up their phase during transit
    for i in range(128):
        fig.add_subplot(32, 4, i+1)

        if i==ant:
            # For autocorrelation plot the visibility rather 
            # than the phase. This gives a sense for when 
            # the source actually transits. 
            plt.plot(vis[nu, misc.feed_map(ant, i+1, 128)])
            plt.xlim(0, len(vis[nu, 0]))
        else:
            angle_err += np.mean(abs(np.angle(vis[nu, misc.feed_map(ant, i+1, 128)]))) / 127.0
            plt.plot((np.angle(vis[nu, misc.feed_map(ant, i+1, 128)])))
            plt.axis('off')
            plt.axhline(0.0, color='black')

            oo = np.round(np.std(np.angle(vis[nu, misc.feed_map(ant, i+1, 128)]) * 180 / np.pi))
            plt.title(np.str(oo) + ',' + np.str(i))

            if i in bad_chans:
                plt.plot(np.angle(vis[nu, misc.feed_map(ant, i+1, 128)]), color='red')
            plt.ylim(-np.pi, np.pi)

    plt.title(np.str(180 / np.pi * angle_err))
            
    del vis

    print "\n Wrote to %s \n" % img_name

    fig.savefig(img_name)

    del fig
예제 #25
0
파일: CompSim.py 프로젝트: raphaelholca/bAP
    def plotRF(self):
        if True: return
        tic_plot = time.time()
        print 'Plot:'
        for t,i in zip(self.timeStamp,range(np.size(self.timeStamp))):
#             if np.mod(t*self.dt,int(self.trialDuration/50))==0: 
#                 j=int(t*self.dt/int(self.trialDuration/50))
#                 timeLeft = -1
#                 if j==0: timeStart = time.time()
#                 else: timeLeft = (50-j)*(time.time()-timeStart)/j
#                 sys.stdout.write('\r')
#                 sys.stdout.write("[%-50s] %d%% done in: %d min %d sec" % ('='*j, 2*j, int((timeLeft)/60), int(np.mod((timeLeft),60)) ))
#                 sys.stdout.flush()
                 
            if np.mod(t,200) == 0:
                #plot color-coded map
                squareW = np.reshape(self.TC_RS_gTracker[:,i], (self.TC.size, self.RS.size))    
                plt.figure(figsize=(7,7))
                rootSize = np.sqrt(self.RS.size)
                ODC_mat = np.zeros(self.RS.size)
                alpha_mat = np.zeros(self.RS.size)
                #create white color map with transparency gradient
                cmap_trans = mpl.colors.LinearSegmentedColormap.from_list('my_cmap',['black','black'],256) 
                cmap_trans._init()
                alphas = np.linspace(1.0, 0, cmap_trans.N+3)
                cmap_trans._lut[:,-1] = alphas
                 
                for RS in range(self.RS.size):
                    prefPattern = [np.sum(squareW[[0,4,8,12],RS]),np.sum(squareW[[1,5,9,13],RS]),np.sum(squareW[[2,6,10,14],RS]),np.sum(squareW[[3,7,11,15],RS])]
                    ODC_mat[RS] = np.argmax(prefPattern)
                    alpha_mat[RS] = np.max(prefPattern)-(np.sum(prefPattern)-np.max(prefPattern))/self.numPattern
                #            ODC_mat[RS] = np.mean(self.TC_RS.g[[0,5,10,15],RS]) - np.mean(self.TC_RS.g[[3,6,9,12],RS])   
                plt.imshow(np.reshape(ODC_mat, [rootSize, rootSize]),interpolation='nearest', cmap='Spectral', vmin=0,vmax=3) #color
                plt.imshow(np.reshape(alpha_mat, [rootSize, rootSize]),interpolation='nearest', cmap=cmap_trans, vmin=-0.25,vmax=1.5) #transparency
                plt.title('Ocular Dominance at ' + np.str(t) + 'ms')
                plt.savefig('../output/current/' + 'codedMap/' + np.str(int(t)) + '.png')
                 
                #plot detailed map
                plt.figure()
                rootSize = np.sqrt(self.TC.size)
                for RS in range(self.RS.size):
                    plt.subplot(int(np.ceil(np.sqrt(self.RS.size))),int(np.ceil(np.sqrt(self.RS.size))), RS+1)
                    plt.imshow(np.reshape(squareW[:,RS],[rootSize, rootSize]), interpolation='nearest', vmin=0, vmax=self.TC_RS.g_max, cmap='bwr')
                    plt.gca().axes.get_xaxis().set_visible(False)
                    plt.gca().axes.get_yaxis().set_visible(False)
                    plt.subplots_adjust(bottom=0.1, right=0.8, top=0.9)
                cax = plt.axes([0.85, 0.1, 0.075, 0.8])
                plt.colorbar(cax=cax)
                plt.suptitle('TC->RS Weights at ' + np.str(t) + 'ms')
                plt.savefig('../output/current/' + 'detailedMap/' + np.str(int(t)) + '.png')
        print "plot time:", int((time.time()-tic_plot)/60), 'min,',  int(np.mod((time.time()-tic_plot),60)), 'sec'
예제 #26
0
def plot():
    if traces: 
        plt.figure()
        if g_excit:  plt.plot(genParam['timeArray'],  neurons['RS'].g_excit[0,:]  , 'r')
        if g_inhib:  plt.plot(genParam['timeArray'],  neurons['RS'].g_inhib[0,:]  , 'b')
        if OP:       plt.plot(genParam['timeArray'],  neurons['RS'].OP[0,:]*30    , 'r.')
        if OP:       plt.plot(genParam['timeArray'],  neurons['FS'].OP[0,:]*30    , 'b.')
        if BPAP:     plt.plot(genParam['timeArray'],  neurons['RS'].BPAP[0,:]*50  , 'k')
        
        plt.ylim(ylim)
        plt.savefig('../output/' + 'g_record_delay.png')
     
    #plot stimulus preference map
    if ODM:
        for t,i in zip(weights['time'],range(np.size(weights['time']))): 
            if np.mod(t,1000) == 0:
                #color-coded map
                squareW = np.reshape(weights['w'][:,i], (neurons['TC'].size, neurons['RS'].size))    
                plt.figure(figsize=(7,7))
                rootSize = np.sqrt(neurons['RS'].size)
                ODC_mat = np.zeros(neurons['RS'].size)
                alpha_mat = np.zeros(neurons['RS'].size)
                #create white color map with transparency gradient
                cmap_trans = mpl.colors.LinearSegmentedColormap.from_list('my_cmap',['black','black'],256) 
                cmap_trans._init()
                alphas = np.linspace(1.0, 0, cmap_trans.N+3)
                cmap_trans._lut[:,-1] = alphas
                 
                for RS in range(neurons['RS'].size):
                    prefPattern = [np.sum(squareW[[0,4,8,12],RS]),np.sum(squareW[[1,5,9,13],RS]),np.sum(squareW[[2,6,10,14],RS]),np.sum(squareW[[3,7,11,15],RS])]
                    ODC_mat[RS] = np.argmax(prefPattern)
                    alpha_mat[RS] = np.max(prefPattern)-(np.sum(prefPattern)-np.max(prefPattern))/genParam['numPattern']
                #            ODC_mat[RS] = np.mean(self.TC_RS.g[[0,5,10,15],RS]) - np.mean(self.TC_RS.g[[3,6,9,12],RS])   
                plt.imshow(np.reshape(ODC_mat, [rootSize, rootSize]),interpolation='nearest', cmap='Spectral', vmin=0,vmax=3) #color
                plt.imshow(np.reshape(alpha_mat, [rootSize, rootSize]),interpolation='nearest', cmap=cmap_trans, vmin=-0.25,vmax=1.5) #transparency
                plt.title('Ocular Dominance at ' + np.str(t) + 'ms')
                plt.savefig('../output/' + 'OcularDominance_' + np.str(int(t)) + '.png')
                 
                #full stimulus preference
                plt.figure()
                rootSize = np.sqrt(neurons['TC'].size)
                for RS in range(neurons['RS'].size):
                    plt.subplot(int(np.ceil(np.sqrt(neurons['RS'].size))),int(np.ceil(np.sqrt(neurons['RS'].size))), RS+1)
                    plt.imshow(np.reshape(squareW[:,RS],[rootSize, rootSize]), interpolation='nearest', vmin=0, vmax=synParam['TC_RS'].g_max, cmap='bwr')
                    plt.gca().axes.get_xaxis().set_visible(False)
                    plt.gca().axes.get_yaxis().set_visible(False)
                    plt.subplots_adjust(bottom=0.1, right=0.8, top=0.9)
                cax = plt.axes([0.85, 0.1, 0.075, 0.8])
                plt.colorbar(cax=cax)
                plt.suptitle('TC->RS Weights at ' + np.str(t) + 'ms')
                plt.savefig('../output/' + 'TC-RS_' + np.str(int(t)) + '.png')
def alk_field(alk_in,alk_drawdown,array_size,spacing):
    tmp = np.zeros([array_size[0],array_size[1]]) + alk_in
    tmp_1 = np.arange(tmp[:,0].size)
    counter = 0
    for i in tmp_1[0:tmp[:,0].size:spacing]:
        for j in tmp_1[0:tmp[0,:].size:spacing]:
            counter += 1
    value = alk_in-((alk_drawdown*(array_size[0]*array_size[1]))/counter)
    for i in tmp_1[0:tmp[:,0].size:spacing]:
        for j in tmp_1[0:tmp[0,:].size:spacing]:
            tmp[i,j] = value
    print 'point alk value = '+np.str(value)
    print 'mean alkalinity = '+np.str(np.mean(tmp))
    return tmp
예제 #28
0
 def _setParam(self, ch, strt, end):
     """
     Function *_setParam()* sets transmission parameters.
     INTERNALLY USE ONLY!
     
     """
     self._visaWrite("DATa:SOUrce CH" + np.str(ch))  # choose which Channel to get
     self._visaWrite("DATa:ENCdg ASCIi")  # can be ASCIi/RIBinary/RPBinary/SRIbinary/SRPbinary
     self._visaWrite(
         "DATa:WIDth 2"
     )  # 1 byte give vertical range of -128 to 127 and 2 bytes gives range of -32768 to 32767
     self._visaWrite("DATa:STARt " + np.str(strt))  # 1 is far left edge
     self._visaWrite("DATa:STOP " + np.str(end))  # 10,000 is far right edge
     return
 def PlotResults(self,plotAll):
     plt.figure
     nLegs = len(self.Legs);
     plt.figure(nLegs+1);
     plt.plot(self.WPenu[0:len(self.WPenu[:,0])-1,0],self.WPenu[0:len(self.WPenu[:,0])-1,1],marker='*');
     
     for i in range(len(self.Legs)):
         plt.figure(nLegs+1);
         plt.plot(self.Legs[i].enu[:,0],self.Legs[i].enu[:,1]);
         
         if(plotAll == 1):
             plt.figure(i+1);
             plt.plot(self.Legs[i].xTrack[:,1],self.Legs[i].xTrack[:,0]);
             plt.plot(np.matrix('0.0;0.0'),np.matrix([[0.0],[self.Legs[i].legDist]]),marker='*')            
             plt.axis('equal');
             plt.xlabel('Cross Track (m)');
             plt.ylabel('Along Track (m)');
             plt.figtext(0.6,0.86,"Leg #"+np.str(i+1));
             plt.figtext(0.6,0.83,"XTrack Area = "+np.str(np.round(self.Legs[i].xTrackTot,decimals=3))+"m^2");
             plt.figtext(0.6,0.80,"Leg Weight = "+np.str(np.round(self.Legs[i].legDist,decimals = 3))+"m");
             plt.figtext(0.6,0.77,"Leg Time = " +np.str(np.round(self.Legs[i].legTimeMS/1000.0, decimals = 3))+"s");
     plt.figure(nLegs+1);
     plt.axis('equal');
     plt.xlabel('East(m)');
     plt.ylabel('North(m)');
     plt.figtext(0.65,0.86,"Team Goose")        
     plt.figtext(0.65,0.83,"Number of Legs = "+np.str(nLegs));        
     plt.figtext(0.65,0.80,"Time Score = "+ np.str(np.round(self.timeScoreMS/1000.0,decimals=3)) + " s");
     plt.figtext(0.65,0.77,"CrossTrack Score = "+np.str(np.round(self.xTrackScore,decimals=3)));
     plt.figtext(0.65,0.74,"Total Distance = "+np.str(np.round(self.totalDist,decimals=3))+" m");
예제 #30
0
def give_String_Number_For_VTK(num):

    #num: # of file to be printed

    if (num < 10):
        strNUM = '000' + np.str(num)
    elif (num < 100):
        strNUM = '00' + np.str(num)
    elif (num<1000):
        strNUM = '0' + np.str(num)
    else:
        strNUM = np.str(num)
    
    return strNUM
예제 #31
0
def get_boundary(x, y, q, xind, yind, vthresh, maxpnt, minarea, rcirc,
                 verbose):
    '''
    
    Usage:
    get_boundary(x,y,q,xind,yind,vthresh,maxpnt,minarea,rcirc,verbose)
    
        Parameters:
        ----------
        x,y        : 2D coordinate arrays of vorticity locations (psi points)
        q          : Array of vorticity computed at x,y points
        xind,yind  : x and y indices of the vorticity extrema to track
        vthresh    : vorticity intensity to track
        maxpnt     : Maximum number of points in the circumference of vortex 
                     Mainly used to avoid infinite loops 
        minarea    : Minimum area to accept the vortex 
        rcirc      : Circularity criterion {C/[2*(pi*area)**0.5]}
        verbose    : Display output messages
        
        Output:
        -------
        bound      : Dictionary of closed boundary points surrounding a vortex.
                     Returns None if the boundary does not meet specific 
                     criteria (see codes for details).
    '''

    # Initialize variables
    out1 = [xind]  # Temporary output parameter 1 (x index)
    out2 = [yind]  # Temporary output parameter 2 (y index)
    out3 = [x[yind, xind]]  # Temporary output parameter 3 (x coordinate)
    out4 = [y[yind, xind]]  # Temporary output parameter 4 (y coordinate)
    exitflag = False  # Flag for loop around contour
    bound = True  # Output variable initialization
    counter = 0  # Counter variable
    searchord = [0, 3, 2, 1]  # Search order (right,down,left,up)

    # Go to the right boundary closest to the velocity extrema point and update
    # output arrays
    tmpind = np.argmin(q[yind, (xind + 1):] >= vthresh)
    xind = xind + tmpind
    out1.append(xind)
    out2.append(yind)
    out3.append(x[yind, xind])
    out4.append(y[yind, xind])

    # Loop around the edge
    while exitflag is False:

        # Counter variable increase
        counter += 1
        brkflag = False

        # Start searching for vorticity values counter-clockwise from the index
        # points of the vorticity extrema
        for aa in range(5):

            if aa == searchord[0]:
                # Search to the right
                # Returns the first false index value
                flagind = q[yind, xind + 1] >= vthresh
                if flagind:
                    xind = xind + 1
                    brkflag = True
            elif aa == searchord[1]:
                # Search down
                flagind = q[yind - 1, xind] >= vthresh
                if flagind:
                    yind = yind - 1
                    brkflag = True
                    #searchord = [0,1,2,3]   # right, down, left, up
            elif aa == searchord[2]:
                # Search to the left
                flagind = q[yind, xind - 1] >= vthresh
                if flagind:
                    xind = xind - 1
                    brkflag = True
            elif aa == searchord[3]:
                # Search upwards
                flagind = q[yind + 1, xind] >= vthresh
                if flagind:
                    yind = yind + 1
                    brkflag = True
            else:
                # No suitable vorticity contour is found around the point being
                # considered.
                if verbose:
                    print("Isolated point found")
                exitflag = True
                bound = False

            # Debugging messages
            if verbose:
                print('counter ' + np.str(counter))
                print('xind ' + np.str(xind))
                print('yind ' + np.str(yind))
                print('aa ' + np.str(aa))
                print('flagind ' + np.str(flagind))
                print('q[yind,xind] ' + np.str(q[yind, xind]))

            if brkflag:
                # Change search order
                if aa == 0:
                    # Shift 90 degrees right
                    searchord = [
                        searchord[3], searchord[0], searchord[1], searchord[2]
                    ]
                elif aa == 2:
                    # Shift 90 degrees left
                    searchord = [
                        searchord[1], searchord[2], searchord[3], searchord[0]
                    ]
                elif aa == 3:
                    # Shift 180 degrees
                    searchord = [
                        searchord[2], searchord[3], searchord[0], searchord[1]
                    ]

                # Break loop
                break

        # Update the index dictionary
        if exitflag is False:

            # Update output list
            out1.append(xind)
            out2.append(yind)
            out3.append(x[yind, xind])
            out4.append(y[yind, xind])

            # Check for maximum number of iterations
            if counter > maxpnt:
                if verbose:
                    print('Maximum number of boundary points (' +
                          np.str(maxpnt) + ') have been exceeded')
                exitflag = True
                bound = False  # Bug fixed v0.1.1

            # Verify if the new indices correspond to the first boundary points
            elif xind == out1[1] and yind == out2[1]:
                if verbose:
                    print('Back to the origin')
                # Remove initial (center) point
                out1 = out1[1:]
                out2 = out2[1:]
                out3 = out3[1:]
                out4 = out4[1:]
                exitflag = True

    # Employ area and shape checks ---------------------------------------------
    if bound:

        # Compute area of the vortex
        out1 = np.array(out1)
        out2 = np.array(out2)
        out3 = np.array(out3)
        out4 = np.array(out4)
        # Shoelace formula
        area = 0.5 * (np.sum(out3[0:-1] * out4[1:]) -
                      np.sum(out3[1:] * out4[0:-1]))
        if area < minarea:
            if verbose:
                print('Vortex rejected on area criterion')
                print('  Vortex area = ' + np.str(area) +
                      'm2 and minimum area = ' + np.str(minarea) + 'm2\n')
            bound = False

    # Circumference test
    if bound:
        perimeter = np.sum(
            ((out3[1:] - out3[0:-1])**2 + (out4[1:] - out4[0:-1])**2)**0.5)
        rvort = perimeter / (2 * np.sqrt(np.pi * area))
        if rvort > rcirc:
            if verbose:
                print('Vortex rejected based on circularity')
                print('  Rvortex = ' + np.str(rvort) + ' and Rmax = ' +
                      np.str(rcirc) + '\n')
            bound = False

    # Compute centroid of the polygon (v0.1.1)
    if bound:
        cx = 1.0 / (6.0 * area) * np.sum(
            (out3[0:-1] + out3[1:]) *
            (out3[0:-1] * out4[1:] - out3[1:] * out4[0:-1]))
        cy = 1.0 / (6.0 * area) * np.sum(
            (out4[0:-1] + out4[1:]) *
            (out3[0:-1] * out4[1:] - out3[1:] * out4[0:-1]))

    # Wrap-up ------------------------------------------------------------------
    if bound:
        bound = {
            'xind': out1,
            'yind': out2,
            'x': out3,
            'y': out4,
            'area': area,
            'rvort': rvort,
            'cx': cx,
            'cy': cy
        }
    return bound
예제 #32
0
#time = [1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60]
#time = [1]

name = 'Truckdataminf'  # runs from 001 to 299

#code = ['001', '002', '003', '004', '005']
#code = ['001']

for c in range(1, 299 + 1):
    nameHere = name + '{:03d}'.format(c)
    for cp in range(1):
        log = 'open d3plot "/home/ndv/stud/data/Truck/' + nameHere + '/d3plot"' + '\n'
        log += 'selectpart on ' + comp[cp] + '/0' + '\n'
        for i in range(1, 60 + 1):
            log += 'output "/home/mabbasloo/Documents/carCrashData/S2000001/f' + '{:03d}'.format(
                c) + '_' + comp[cp] + '_' + np.str(i) + '.stl" ' + np.str(
                    i) + ' 7 0 0' + '\n'
        log += 'stop'
        file = open(
            '/home/mabbasloo/Documents/carCrashData/S2000001/Data.cfile', 'w')
        file.write(log)
        file.close()
        os.system(
            '/home/mabbasloo/Documents/lsprepost4.0_centos6/lspp4 Data.cfile')
        for i in range(1, 60 + 1):
            fname = '/home/mabbasloo/Documents/carCrashData/S2000001/f' + '{:03d}'.format(
                c) + '_' + comp[cp] + '_' + np.str(i)
            os.system('/home/mabbasloo/meshconv ' + fname + '.stl ' +
                      '-c obj -o ' + fname)
            #os.system('/home/mabbasloo/simplify ' + fname + '.obj ' + fname + '.obj ' + np.str(ratio[cp]))
            os.system('/home/mabbasloo/meshconv ' + fname + '.obj ' +
예제 #33
0
 def __str__(self):
     return np.str(self.q)
예제 #34
0
    W_gt = np.zeros((H, D2, D2))
    for i in xrange(D2):
        W_gt[i, i, :] = bar_value
        W_gt[D2 + i, :, i] = bar_value
    if neg_bars > 0.0:
        W_gt[sample(range(H), np.int(H * neg_bars))] *= -1
    W_gt = W_gt.reshape((H, D))
    W_gt += np.random.normal(size=(H, D), scale=0.5)

    # Prepare model...
    model = BSC_ET(D, H, Hprime, gamma, to_learn)
    mparams = {'W': W_gt, 'pi': pi_gt, 'sigma': sigma_gt, 'mu': mu_gt}
    mparams = comm.bcast(mparams)

    pprint("Generating Model Parameters:")
    pprint("pi = " + np.str(mparams['pi']) + "; sigma = " +
           np.str(mparams['sigma']))

    # Generate trainig data
    my_N = N // comm.size
    my_data = model.generate_data(mparams, my_N)
    dlog.append('y', my_data['y'][0:20])

    # Choose annealing schedule
    anneal = LinearAnnealing(anneal_steps)
    anneal['T'] = [(15, start_temp), (-10, end_temp)]
    anneal['Ncut_factor'] = [(0, 0.), (2. / 3, 1.)]
    anneal['anneal_prior'] = anneal_prior
    anneal['W_noise'] = [(0., W_noise_intensity), (0.9, W_noise_intensity),
                         (1., 0.)]
    anneal['pi_noise'] = [(0., pi_noise_intensity), (0.9, pi_noise_intensity),
예제 #35
0
ncfile.createDimension('eta_rho', size=Mp)
ncfile.createDimension('eta_u', size=Mp)
ncfile.createDimension('eta_v', size=M)
ncfile.createDimension('s_rho', size=N)
ncfile.createDimension('s_w', size=Np)
ncfile.createDimension('tracer', size=2)
ncfile.createDimension('time', size=1)
ncfile.createDimension('one', size=1)

# creating GLOBAL ATTRIBUTES
setattr(ncfile, 'type', filetypestr)
setattr(ncfile, 'title', run.ini_info)
setattr(ncfile, 'out_file', run.run_name + filenamestr)
setattr(ncfile, 'grd_file', run.run_name + '_grd.nc')
now = dt.datetime.now()
setattr(ncfile, 'history', np.str(now))

# creating VARIABLES, ATTRIBUTES and ASSIGNING VALUES

# ---------------------------------------------------------------------------
ncfile.createVariable('spherical', 'c')
setattr(ncfile.variables['spherical'], 'long_name', 'grid type logical switch')
setattr(ncfile.variables['spherical'], 'flag_values', 'T, F')
setattr(ncfile.variables['spherical'], 'flag_meanings', 'spherical, cartesian')
ncfile.variables['spherical'][:] = spherical

# ---------------------------------------------------------------------------
ncfile.createVariable('Vtransform', 'd', dimensions=('one'))
setattr(ncfile.variables['Vtransform'], 'long_name',
        'vertical terrain-following transformation equation')
ncfile.variables['Vtransform'][:] = run.vtransform
예제 #36
0
    # Test if unique code ucode is string or unicode, treat differently if so
    if isinstance(ucode[jai], str):
        l = ucode[jai]
        lorig = l
        if ' ' in l:
            l = l.replace(' ', '_')
    elif isinstance(ucode[jai], unicode):
        l = ucode[jai]
        lorig = l
        if ' ' in l:
            l = l.replace(' ', '_')
    else:
        l = ucode[jai]
        lorig = l
        l = np.str(np.int(l))
    # Output tiff name
    otiff1 = idir + '/rp5k130k90m_' + j + '_' + i + '_' + kk + '_ifl/cfinal/' + fluxdir + '/' + j + '_' + i + '_' + k + '_' + l + '_dflux.tif'

    # Get shapes
    cx = shpfile.loc[[s for s, n in enumerate(shpfile[field]) if n == lorig], ]
    # If at least one shape is returned, keep processing
    if cx.shape[0] > 0:
        # Get target column and geometry column
        cx = cx[[field, 'geometry']]
        # Dissolve if there are multiple shapes
        if cx.shape[0] > 1:
            cx = cx.dissolve(by=field, aggfunc='first')
            cx[field] = cx.index
        # Fix simple self intersections if necessary
        if cx.is_valid.bool() == False:
예제 #37
0

# 图像的模板匹配

img = cv2.imread('opencv-logo.png')
target = cv2.imread('opencv-blue-logo.png')
cv2.imshow('input', img)
cv2.imshow('target', target)

methods = [cv2.TM_SQDIFF_NORMED, cv2.TM_CCORR_NORMED, cv2.TM_CCOEFF_NORMED]
th, tw = target.shape[:2]
for method in methods:
    result = cv2.matchTemplate(img, target, method)
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)

    # 左上
    if method == cv2.TM_SQDIFF_NORMED:
        tl = min_loc
    else:
        tl = max_loc
    # 右下
    dr = (tl[0] + tw, tl[1] + th)
    cv2.rectangle(img, tl, dr, (0, 0, 255), 2)
    cv2.imshow('match' + np.str(method), img)

cv2.waitKey(0)
cv2.destroyAllWindows()



예제 #38
0
def uniqueness(coords, maxspeed, maxdt, verbose=False, maxiter=10000):
    '''
    
    Usage:
    indices = uniqueness(coords,maxspeed,maxdt,verbose,maxiter)
    
        Parameters:
        ----------
        coords     : List of dictionaries containing the vortex boundary and
                     center information
        maxspeed   : Maximum speed allowed for vortices [m/s]
        maxdt      : Maximum time between sucessive vortices [s]
        verbose    : Display output messages (defaults to False)
        maxiter    : Maximum number of iterations (defaults to 10000)
        
        Output:
        -------
        indices    : List of lists that contain temporal indices for unique 
                     vortices
                     
    '''

    # Extract time and coordinate values from the vorticity extrema point
    xe = []
    ye = []
    ot = []
    for aa in range(len(coords)):
        xe.append(coords[aa]['xe'])
        ye.append(coords[aa]['ye'])
        ot.append(coords[aa]['ocean_time'])

    # Loop forward in time tracking each vortex in the order found
    counter = 0  # Vortex counter variable
    iter = 0  # Emergency maximum iteration
    vortexflag = False  # Flag for considered vortices
    indices = []  # List of lists
    indexedvort = np.zeros_like(xe,
                                dtype=bool)  # Array of values to manipulate
    # counter variable
    tmpindices = [0]  # Indices for current vortex
    indexedvort[0] = True  # First one is indexed

    if verbose:
        print('\n')
        print('=' * 60)
        print('New vortex found')

    # Loop forward from the second vortex center
    while vortexflag is not True:

        # Parent loop counter
        iter += 1
        samevort = True
        if verbose:
            print('  Iteration number ' + np.str(iter))

        # Find vortex life and distance with respect to current evaluated
        # point. I should to filtering of the already considered vortex values.
        tmplife = ot[(counter + 1):] - ot[counter]
        tmpspeed = (((((xe[(counter + 1):] - xe[counter])**2) +
                      (ye[(counter + 1):] - ye[counter])**2)**0.5) / tmplife)

        # Loop forward in time and space to find a vortex that satisfies the
        # life and speed limits. If this fails the counter variable will
        # increase and a new list will be generated to find the next vortex.
        for aa in range(len(tmplife)):

            # Verify vortex age and distance
            if (tmplife[aa] < maxdt) and (tmpspeed[aa] < maxspeed):
                # If true then this is the same vortex and the index will
                # be added to the temporary list
                counter += aa + 1
                tmpindices.append(counter)
                indexedvort[counter] = True
                if verbose:
                    print('  counter = ' + np.str(counter))
                break
            elif aa == (len(tmplife) - 1):
                # This is a new vortex
                samevort = False

        # Check if current vortex reached end of file
        # Search for new vortex (current one has already been allocated)
        if counter == (len(xe) - 1):
            if verbose:
                print('End of file reached on current vortex')
            samevort = False

        # If the evaluated vortex is a new vortex then add the previous
        # vortex to the output list and reset the temporary list
        if samevort is not True:
            indices.append(list(tmpindices))
            counter = np.argmin(indexedvort)
            tmpindices = [counter]
            indexedvort[counter] = True

            if verbose:
                print('\n')
                print('=' * 60)
                print('New vortex found')
                print('  counter = ' + np.str(counter))

        # Check if all the vortices have been considered and allocate current
        # array
        if False not in indexedvort:
            if verbose:
                print('\n')
                print('=' * 60)
                print('All vortices have been indexed')
            vortexflag = True
            indices.append(list(tmpindices))
        # Check for iteration limits
        elif iter > maxiter:
            if verbose:
                print('\n')
                print('=' * 60)
                print('Warning: Maximum number of iterations reached')
            vortexflag = True

    # Print summary to screen --------------------------------------------------
    if verbose:
        print("A total of " + np.str(len(indices)) +
              " different vortices found")

    # End of uniqueness code
    return indices


# End of module
예제 #39
0
euler_sz = (2*np.pi)*(np.pi)*(2*np.pi)

inc = 6.

"""Retrieve Euler angle set"""

euler, n_tot = euler_grid_center(inc, phi1max, phimax, phi2max)

"""Calculate X"""

st = time.time()

X = gsh.gsh_eval(euler, np.arange(N_L))

print "basis evaluation complete: %ss" % np.round(time.time()-st, 3)
print "size of X: %sgb" % np.str(X.nbytes/(1E9))

"""Perform the orthogonality check"""

inner_mat = np.zeros((N_L, N_L), dtype='complex128')

euler_frac = domain_sz/euler_sz
print "integration domains per euler space: %s" % str(1./euler_frac)

fzsz = 1./(euler_frac*8.*np.pi**2)
bsz = domain_sz/n_tot
print "bsz: %s" % bsz
print "n_tot: %s" % n_tot

for ii in xrange(N_L):
예제 #40
0
def vortex_tracking_main(x, y, q, ot, vthresh, maxpnt, stencil, boundary,
                         minarea, rcirc, verbose):
    '''
    
    Usage:
    coords = vortex_tracking(x,y,q,ot,vthresh,stencil,boundary,minarea,
                             rcirc,verbose)
    
        Parameters
        ----------
        x,y      : 2D arrays of coordinates
        q        : Array of vorticity computed at x,y points
        ot       : Time vector
        vthresh  : Tracking threshold 
        maxpnt   : Maximum number of points in a given vortex
        stencil  : Initial area filtering stencil 
        boundary : Width of the boundary (indices)
        minarea  : Minimum area to accept a vortex
        rcirc    : Circularity criterion {C/[2*(pi*area)**0.5]}
        verbose  : Print messages to the screen
    
        Output:
        ------
        coords   : List of dictionaries with vortex boundaries
        
        Fields in coords:
        -----------------
        ocean_time : time stamp [s]
        xind       : x indices of vortex boundary
        yind       : y indices of vortex boundary
        x          : x location of vortex boundary [m]
        y          : y location of vortex boundary [m]
        area       : vortex area [m2]
        rvort      : Vortex circularity {Circumference/[2*(pi*area)**0.5]}
        qe         : Vorticity extrema [s-1]
        xeind      : Vorticity extrema index in x direction
        yeind      : Vorticity extrema index in y direction
        xe         : x location of vorticity extrema [m]
        ye         : y location of vorticity extrema [m]
        cx         : x location of vorticity centroid [m]
        cy         : y location of vorticity centroid [m]
        
        
    '''

    # Initialize variables
    coords = []

    # Loop over time
    for aa in range(len(ot)):

        # Identify vorticity extrema
        qi = q[aa, :, :]
        # Get 2d indices of values that exceed threshold
        yind, xind = np.where(qi > vthresh)
        xe = x[yind, xind]
        ye = y[yind, xind]
        qe = qi[yind, xind]

        # Sort from strongest to weakest
        sort_ind = np.flipud(np.argsort(qe))
        xe = xe[sort_ind]
        ye = ye[sort_ind]
        qe = qe[sort_ind]
        xind = xind[sort_ind]
        yind = yind[sort_ind]

        # Area filtering (remove adjacent points that exceed threshold based on
        # the stencil size)
        indtmp = np.ones_like(xind, dtype=bool)
        for bb in range(len(indtmp)):
            if indtmp[bb]:
                xtmp = np.abs(xind - xind[bb]) > stencil
                ytmp = np.abs(yind - yind[bb]) > stencil
                xytmp = np.logical_and(xtmp, ytmp)
                indtmp[(bb + 1):] = xytmp[(bb + 1):] * indtmp[(bb + 1):]
        # Remove adjacent points
        xe = xe[indtmp]
        ye = ye[indtmp]
        qe = qe[indtmp]
        xind = xind[indtmp]
        yind = yind[indtmp]

        # Remove points adjacent to the boundaries
        indtmp = np.logical_and(xind >= boundary, xind <
                                (qi.shape[1] - boundary))
        xe = xe[indtmp]
        ye = ye[indtmp]
        qe = qe[indtmp]
        xind = xind[indtmp]
        yind = yind[indtmp]
        indtmp = np.logical_and(yind >= boundary, yind <
                                (qi.shape[0] - boundary))
        xe = xe[indtmp]
        ye = ye[indtmp]
        qe = qe[indtmp]
        xind = xind[indtmp]
        yind = yind[indtmp]

        # If there is at least a point over threshold
        if len(qe) > 1:

            # Find the boundaries of the vortex
            # From largest to smallest intensity
            # Counter clockwise search will be employed

            # Looping over vortex points
            for bb in range(len(qe)):

                # Check if the evaluated vortex lies within the boundaries of a
                # previously computed vortex
                # TO DO SOON!!!

                # Get boundary of vortex
                # Returns False if the vortex is not found or does not meet the
                # selected criteria.
                bndtmp = get_boundary(x, y, qi, xind[bb], yind[bb], vthresh,
                                      maxpnt, minarea, rcirc, verbose)

                # Prepare output list
                if bndtmp is not False:

                    # Add fields to dictionary
                    bndtmp['ocean_time'] = ot[aa]
                    bndtmp['qe'] = qe[bb]
                    bndtmp['xeind'] = xind[bb]
                    bndtmp['yeind'] = yind[bb]
                    bndtmp['xe'] = xe[bb]
                    bndtmp['ye'] = ye[bb]

                    # Add to output matrix
                    coords.append(bndtmp)

        else:
            if verbose:
                print("  No vorticity values exceed the selected threshold " +
                      "of " + np.str(vthresh) + "s^{-1}")
                print("    after the area and boundary filters are applied.")
                print("    Field " + np.str(aa + 1) + " of " + np.str(len(ot)))

    # End of function
    return coords
예제 #41
0
def get_pred(sn, el, ns, set_id, step, compl):
    """read the file for euler angle, total strain and plastic strain fields"""

    f = h5py.File("ref_%s%s_s%s.hdf5" % (ns, set_id, step), 'r')

    print f.get('euler').shape

    euler = f.get('euler')[sn, ...]
    euler = euler.swapaxes(0, 1)

    print euler.shape

    et = np.zeros((el**3, 6))
    ep = np.zeros((el**3, 6))

    for ii in xrange(6):
        comp = compl[ii]
        tmp = f.get('r%s_epsilon_t' % comp)[sn, ...]
        et[:, ii] = tmp.reshape(el**3)

        tmp = f.get('r%s_epsilon_p' % comp)[sn, ...]
        ep[:, ii] = tmp.reshape(el**3)

    f.close()
    """find the deviatoric strain tensor"""
    isdev = np.all(np.isclose(np.sum(et[:, 0:3]), np.zeros(el**3)))
    print "is trace(et) == 0?: %s" % isdev

    et_ = np.zeros(et.shape)
    et_[:,
        0:3] = et[:,
                  0:3] - (1. / 3.) * np.expand_dims(np.sum(et[:, 0:3], 1), 1)
    et_[:, 3:] = et[:, 3:]

    isdev = np.all(np.isclose(np.sum(et_[:, 0:3]), np.zeros(el**3)))
    print "is trace(et_) == 0?: %s" % isdev
    """find the norm of the tensors"""
    en = tensnorm(et_)

    print "sn: %s" % sn
    print "min(en): %s" % en.min()
    print "max(en): %s" % en.max()
    """normalize the deviatoric strain tensor"""
    et_n = et_ / np.expand_dims(en, 1)

    isnorm = np.all(np.isclose(tensnorm(et_n), np.ones(el**3)))
    print "is norm(et_n) == 0?: %s" % isnorm
    """write the normalized deviatioric total strain and plastic strains
    in matrix form"""
    et_m = tens2mat(et_n)

    epn = tensnorm(ep)
    # epn_max = np.argmax(epn)
    orig = epn
    # print "max(norm(ep)): %s" % epn[epn_max]
    # print "euler @ max(norm(ep)): %s" % str(euler[epn_max, ...])
    # print et[epn_max, ...]
    # print et_[epn_max, ...]
    # print et_n[epn_max, ...]
    """find the eigenvalues of the normalized tensor"""
    eigval_, g_p2s_ = LA.eigh(et_m)
    del et_m

    print "eigval_ example (before sort): %s" % str(eigval_[0, :])
    print "g_p2s_ example (before sort):"
    print g_p2s_[0, ...]
    """sort the eigenvalues/vectors by highest to lowest magnitude
    eigenvalue"""
    esort = np.argsort(np.abs(eigval_))[:, ::-1]

    eigval = np.zeros(eigval_.shape)
    g_p2s = np.zeros(g_p2s_.shape)

    for ii in xrange(el**3):
        eigval[ii, :] = eigval_[ii, esort[ii, :]]
        for jj in xrange(3):
            g_p2s[ii, jj, :] = g_p2s_[ii, jj, esort[ii, :]]

    print "eigval example (after sort): %s" % str(eigval[0, :])
    print "g_p2s example (after sort):"
    print g_p2s[0, ...]
    """find the deformation mode"""
    theta = np.arctan2(-2 * eigval[:, 0] - eigval[:, 2],
                       np.sqrt(3) * eigval[:, 2])
    theta += np.pi * (theta < 0)

    print "min(theta): %s" % np.str(theta.min() * 180. / np.pi)
    print "mean(theta): %s" % np.str(theta.mean() * 180. / np.pi)
    print "max(theta): %s" % np.str(theta.max() * 180. / np.pi)
    """find g_p2c = g_p2s*g_s2c"""
    g_s2c = ef.bunge2g(euler[:, 0], euler[:, 1], euler[:, 2])
    """this application of einsum is validated vs loop with np.dot()"""
    g_p2c = np.einsum('...ij,...jk', g_s2c, g_p2s)

    phi1, phi, phi2 = ef.g2bunge(g_p2c)

    X = np.vstack([phi1, phi, phi2]).T
    # X = np.array(ef.g2bunge(g_p2c)).T
    # X = np.array(ef.g2bunge(g_p2c.swapaxes(1, 2))).T
    # X = np.array(ef.g2bunge(g_p2s)).T
    # X = np.array(ef.g2bunge(g_p2s.swapaxes(1, 2))).T
    # X = np.array(ef.g2bunge(g_s2c)).T
    # X = np.array(ef.g2bunge(g_s2c.swapaxes(1, 2))).T

    del phi1, phi, phi2

    pred = rr.eval_func(theta, X, en).real

    print "min(orig): %s" % orig.min()
    print "min(pred): %s" % pred.min()
    print "max(orig): %s" % orig.max()
    print "max(pred): %s" % pred.max()

    return orig, pred
예제 #42
0
# script to plot the MCMC steps, do contours and plot the PDFs

import pylab as pl
import numpy as np

datan = '2'
data =  np.loadtxt('spec_mcmc_new_'+datan+'.out') 
print len(data[:,0]), 'steps:' + np.str(len(data[:,0])/60.0) 
x, y = data[:,0], data[:,1]

nd = len(x) 

xmax, xmin = 20, 18
ymax, ymin = 100, 0

#xmin, xmax, ymin, ymax = x.min(), x.max(), y.min(), y.max()

nbins = 50.0
xbsize = (xmax - xmin)/(nbins -1)
ybsize = (ymax - ymin)/(nbins -1)

xarr = np.linspace(xmin,xmax,nbins)
yarr,step = np.linspace(ymin,ymax,nbins,retstep=True)

print step,ybsize

dgrid = np.zeros([nbins,nbins])

for j in range(nd):
	ix = int((x[j] - xmin) / xbsize)
	iy = int((y[j] - ymin) / ybsize)
예제 #43
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--kernel',
                        type=str,
                        default='random_forest',
                        help='Kernel type to be used in the algorithm')
    parser.add_argument('--penalty',
                        type=float,
                        default=1.0,
                        help='Penalty parameter of the error term')
    parser.add_argument('--credentail_path_arg',
                        type=str,
                        default='credentail_path',
                        help='Google credentail path')
    parser.add_argument('--project_name_arg',
                        type=str,
                        default='project_name',
                        help='Google project name')
    parser.add_argument('--bucket_name_arg',
                        type=str,
                        default='bucket_name',
                        help='Bucket name')
    parser.add_argument('--file_path_arg',
                        type=str,
                        default='file_path',
                        help='File path')

    args = parser.parse_args()
    run.log('Kernel type', np.str(args.kernel))
    run.log('Penalty', np.float(args.penalty))

    # data = load_breast_cancer() # loading the dataset

    os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = args.credentail_path_arg

    project_name = args.project_name_arg
    bucket_name = args.bucket_name_arg
    file_path = args.file_path_arg
    fs = gcsfs.GCSFileSystem(
        project=project_name,
        token=os.environ['GOOGLE_APPLICATION_CREDENTIALS'])
    fs.ls(bucket_name)

    with fs.open(file_path, 'rb') as f:
        data = pd.read_csv(f)

    X = data.iloc[:, :-1]  # 学習とテストデータ作成
    y = data.iloc[:, -1]
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        random_state=seed)
    kfold = model_selection.KFold(n_splits=5)
    scores = {}

    rfc_clf = RandomForestClassifier(max_depth=5,
                                     random_state=seed)  #ランダムフォレスト
    rfc_clf.fit(X_train, y_train)

    results = model_selection.cross_val_score(rfc_clf,
                                              X_test,
                                              y_test,
                                              cv=kfold)  # 結果作成
    scores[('Random Forest', 'train_score')] = results.mean()
    scores[('Random Forest', 'test_score')] = rfc_clf.score(X_test, y_test)
    print(scores)

    os.makedirs('outputs', exist_ok=True)  # モデル保存
    joblib.dump(rfc_clf, 'outputs/model.joblib')
예제 #44
0
def main():
    flag_adv_vel = True  # read in advection velocity

    # path_data = '/Users/bettinameyer/Dropbox/ClimatePhysics/Code/Tracking/RadarData_Darwin/Radar_Tracking_Data'
    path_data = '/Users/bettinameyer/Dropbox/ClimatePhysics/Code/Tracking/RadarData_Darwin/Radar_Tracking_Data_test'

    # path_in = os.path.join(path_data, files_vel[0])
    # rootgrp = nc.Dataset(path_in, 'r')
    # var = rootgrp.variables['radar_estimated_rain_rate']
    # rootgrp.close()
    ''' (a) Advection Velocity Histogram'''
    # Data structure:
    # Variables:
    # - time: units = "day as %Y%m%d.%f"    (time = 6)
    # - lev: axis = "Z"; (vertical level)   (lev = 1)
    # - x, y: division of domain for advection vel computation in tracking algorithm
    #                                       (x = y = 2)
    # - var1(time, lev, y, x); var2(time, lev, y, x); var3(time, lev, y, x);
    #       >> dim(var1) = time * lev * y * x = 6 * 1 * 2 * 2
    #       >> var1 = (time, lev, y, x) = (6, 1, 2, 2)
    #
    # Generated Data:
    # dict_vel_norm_domain[date]:           dictionary >> contains for each date a (6,1)-array for the domain averaged velocity norm in 4-hourly intervals
    #

    date_arr = []  # array with all data

    # (i) read in netcdf-file
    if flag_adv_vel:
        files_vel = [
            name for name in os.listdir(path_data)
            if (name[4:13] == 'advection' and name[-3:] == '.nc')
        ]
        # files_vel = [name for name in os.listdir(path_data) if (name[4:24] == 'advection_field_it1_' and name[-3:] == '.nc')]
        n_files_vel = len(files_vel)
        print('# files vel: ' + np.str(n_files_vel))  # all files: 5250
        print(files_vel)
        print('')
        # read in test file
        path_in = os.path.join(path_data, files_vel[0])
        rootgrp = nc.Dataset(path_in, 'r')
        vel_x = rootgrp.variables['var1']
        n_time = vel_x.shape[0]
        n_lev = vel_x.shape[1]
        n_y = vel_x.shape[2]
        n_x = vel_x.shape[3]

        # for histogram
        vel_norm_coll = []
        vel_norm_domain_coll = []
        vel_norm_domain_daily_coll = []

        dict_vel_norm_domain = {}
        dict_vel_norm_domain_day = {}
        # print('dict:', type(dict_vel_norm_domain), dict_vel_norm_domain)

    # ''' (A) Collect data (general)'''
    # for path_in in glob.glob(os.path.join(path_data, '*.nc')):
    #     data_name = ntpath.basename(path_in)[:-3]
    #     date = data_name[-8:]
    #     ''' (i) read in advection velocity components & compute norm '''
    #     if data_name[4:13] == 'advection':
    #         pass

    # ''' (A) Test if velocity data different for all days'''
    # test_vel_data(files_vel, path_data, n_time, n_x, n_y)
    ''' (B) Collect velocity data '''
    if flag_adv_vel:
        for data_name in files_vel:
            ''' (i) read in advection velocity components & compute norm '''
            path_in = os.path.join(path_data, data_name)
            rootgrp = nc.Dataset(path_in, 'r')
            print('file: ', data_name, path_in)
            var = rootgrp.variables['var1']

            if var.shape[0] < 12:
                print('PROBLEM WITH VAR SHAPE: ' + str(var.shape))
                print('')
                continue
            print('var:      ', var.shape)

            vel_adv = np.ndarray(shape=(np.append(3, var.shape)))
            vel_adv[0, :] = var[:]
            var = rootgrp.variables['var2']
            vel_adv[1, :] = var[:]
            var = rootgrp.variables['var3']
            vel_adv[2, :] = var[:]
            rootgrp.close()
            ''' (ii) compute velocity norms '''
            vel_norm = np.linalg.norm(vel_adv,
                                      axis=0)  # vel_norm = (6, 1, 2, 2)

            # collect for all data >> histogram
            vel_norm_coll = np.append(vel_norm_coll, np.ravel(vel_norm))
            # average over domain & collect for all data >> histogram
            vel_norm_domain_coll = np.append(
                vel_norm_domain_coll,
                np.mean(np.mean(vel_norm[:, :, :, :], axis=3), axis=2))
            # average over domain and day & collect for all data >> histogram
            vel_norm_domain_daily_coll = np.append(vel_norm_domain_daily_coll,
                                                   np.mean(vel_norm))
            ''' (iii) save all dates with complete data in array '''
            date = data_name[-11:-3]
            date_arr = np.append(date_arr, date)

            # dictionary: contains for each date a (12,1)-array for the domain averaged velocity norm in 4-hourly intervals
            dict_vel_norm_domain[date] = np.mean(np.mean(vel_norm[:, :, :, :],
                                                         axis=3),
                                                 axis=2)
            dict_vel_norm_domain_day[date] = np.mean(vel_norm)

            print('')

    print('')
    print('vel norm:                   ', vel_norm.shape)
    print('vel norm coll:              ', vel_norm_coll.shape)
    print('vel norm domain coll:       ', vel_norm_domain_coll.shape)
    print('vel norm domain daily coll: ', vel_norm_domain_daily_coll.shape)
    print('')
    d = date_arr[0]
    print('dict vel norm domain:       len: ', len(dict_vel_norm_domain),
          'element shape: ', dict_vel_norm_domain[d].shape)
    print('dict vel norm daily domain: len: ', len(dict_vel_norm_domain_day),
          'element shape: ', dict_vel_norm_domain_day[d].shape)
    print('')

    # ''' (B) plotting '''
    # if flag_adv_vel:
    #     ''' (i) plot velocity histogram '''
    #     plot_adv_vel_hist(vel_norm_coll, vel_norm_domain_coll, vel_norm_domain_daily_coll, n_time, path_data)
    ''' (C) filtering'''
    print('')
    print('dates: ', date_arr)
    print('')
    # print(dict_vel_norm_domain)
    # print('')
    # print(dict_vel_norm_domain_day)
    # print('')

    max_v_norm = 5.  # threshold for 2-hourly mean advection velocity
    max_v_norm_daily = 2.5  # threshold for daily mean advection velocity
    dict_adv_small = {}
    dict_adv_small_daily = {}
    if flag_adv_vel:
        for d in date_arr:
            print('date', d)

            if dict_vel_norm_domain_day[d] > max_v_norm_daily:
                print('big (daily): ' + np.str(dict_vel_norm_domain_day[d]))
            else:
                print('small (daily): ' + np.str(dict_vel_norm_domain_day[d]))
                dict_adv_small_daily[d] = dict_vel_norm_domain_day[d]

            if np.any(dict_vel_norm_domain[d] > max_v_norm):
                print('big')
            else:
                print('small')
                dict_adv_small[d] = dict_vel_norm_domain[d]
        print('')
        print('small advection: ', dict_adv_small)
        print('')
        print('small advection daily: ', dict_adv_small_daily)

    return
예제 #45
0
def fit_hist(data_postselect_raw, numbins=100, plot_hist=True, logplot=True):
    numbins = numbins
    xmin = min(min(np.real(data_postselect_raw)),
               min(np.imag(data_postselect_raw)))
    xmax = max(max(np.real(data_postselect_raw)),
               max(np.imag(data_postselect_raw)))

    histi_re, bin_edges_re = np.histogram(np.real(data_postselect_raw),
                                          bins=numbins,
                                          density=True)
    histi_im, bin_edges_im = np.histogram(np.imag(data_postselect_raw),
                                          bins=numbins,
                                          density=True)
    hist2D, xedges, yedges = np.histogram2d(
        np.real(data_postselect_raw), np.imag(data_postselect_raw),
        [numbins - 1, numbins - 1])  #, range=[[xmin, xmax], [xmin, xmax]])

    x = xedges
    y = histi_re

    gaussroots = []
    Aroots = y.max()
    #dAroots=np.multiply(y.max(),0.2)
    while np.size(gaussroots) < 4:
        Aroots = Aroots / 2
        yroots = y - Aroots
        spline = scipy.interpolate.splrep(x, yroots)
        gaussroots = scipy.interpolate.sproot(spline)

    gaussroots = np.sort(gaussroots)
    print gaussroots
    t01 = (gaussroots[1] + gaussroots[0]) / 2
    t02 = (gaussroots[3] + gaussroots[2]) / 2
    listforindex01 = abs(xedges - t01)
    index01 = listforindex01.argmin()
    listforindex02 = abs(xedges - t02)
    index02 = listforindex02.argmin()
    A1 = max(y[0:index01])
    A2 = max(y[index02:-1])
    sigma1 = max((gaussroots[3] - gaussroots[2]) / 2,
                 (gaussroots[1] - gaussroots[0]) / 2)
    sigma2 = sigma1
    popt, pcov = scipy.optimize.curve_fit(gaussian_sum,
                                          x,
                                          y,
                                          (A1, sigma1, t01, A2, sigma2, t02),
                                          maxfev=1000000)
    thresholdVec = [
        np.abs(+math.erf((t - popt[2]) / (np.sqrt(2) * np.abs(popt[1]))) +
               math.erf((t - popt[5]) / (np.sqrt(2) * np.abs(popt[4]))))
        for t in xedges
    ]
    thresholdIdx = np.argmin(thresholdVec)
    threshold = xedges[thresholdIdx]
    #threshold=(popt[2]+popt[5])/2

    if plot_hist:
        fs = 14
        fsTicks = 14
        gaussfit = gaussian_sum(xedges, *popt)
        Pgth = 0
        for i in range(np.size(xedges) - 1):
            if xedges[i] < threshold:
                Pgth = Pgth + histi_re[i] * (xedges[1] - xedges[0])

        Scurve = np.zeros((numbins))
        for i in range(numbins - 1):
            Scurve[i + 1] = Scurve[i] + histi_re[i]
        Scurve = Scurve / Scurve[-1]

        Peg = 0.5 * (1 - math.erf(
            (threshold - popt[2]) / np.sqrt(2) / np.abs(popt[1])))
        Pge = 0.5 * (1 + math.erf(
            (threshold - popt[5]) / np.sqrt(2) / np.abs(popt[4])))

        data_string = r'${\rm P_{g|e}}=$' + str(round(Pge * 100, 3)) + r'%'
        data_string_0 = r'${\rm P_{e|g}}=$' + str(round(Peg * 100, 3)) + r'%'
        data_string2 = r'P$_{gth}$=' + np.str(np.round(Pgth, 3) * 100) + '%'
        data_string3 = r'1-P$_{gth}$=' + np.str(100 -
                                                np.round(Pgth, 3) * 100) + '%'
        data_string_01 = r'${\rm Threshold}=$' + str(round(threshold, 3))
        fig, ax = plt.subplots(2, 2, figsize=(10, 10))
        if logplot:
            ax[0][0].pcolor(xedges,
                            yedges,
                            np.transpose(np.log(1 + hist2D)),
                            cmap='afmhot')
        else:
            ax[0][0].pcolor(xedges,
                            yedges,
                            np.transpose(hist2D),
                            cmap='afmhot')
        ax[0][0].axis('equal')
        ax[0][0].axis('tight')
        ax[1][0].plot(xedges,
                      histi_re,
                      '.',
                      xedges,
                      gaussfit,
                      '--',
                      linewidth=2.0)
        ax[1][0].text(0.5,
                      0.4,
                      data_string,
                      bbox=dict(facecolor='red', alpha=0.5),
                      transform=ax[1][0].transAxes)
        ax[1][0].text(0.2,
                      0.4,
                      data_string_0,
                      bbox=dict(facecolor='red', alpha=0.5),
                      transform=ax[1][0].transAxes)
        ax[1][0].text(0.5,
                      0.2,
                      data_string_01,
                      bbox=dict(facecolor='red', alpha=0.5),
                      transform=ax[1][0].transAxes)
        ax[0][0].text(0.5,
                      0.7,
                      data_string2,
                      bbox=dict(facecolor='red', alpha=0.5),
                      transform=ax[0][0].transAxes)
        ax[0][0].text(0.5,
                      0.3,
                      data_string3,
                      bbox=dict(facecolor='red', alpha=0.5),
                      transform=ax[0][0].transAxes)
        ax[1][0].axis('tight')
        ax[1][0].set_ylim([histi_re.max(), 0])
        ax[0][1].plot(histi_im, yedges)
        ax[0][1].axis('tight')
        #ax[1][1].plot(xedges, Scurve)
        #ax[1][1].set_ylim([0,1])
        #ax[1][1].axis('tight')
        plt.show()
    return popt, threshold
예제 #46
0
def world_bank_wealth_account(cntry_iso,
                              ref_year,
                              variable_name="NW.PCA.TO",
                              no_land=True):
    """
    Download and unzip wealth accounting historical data (1995, 2000, 2005, 2010, 2014)
    from World Bank (https://datacatalog.worldbank.org/dataset/wealth-accounting).
    Return requested variable for a country (cntry_iso) and a year (ref_year).

    Inputs:
        cntry_iso (str): ISO3-code of country, i.e. "CHN" for China
        ref_year (int): reference year
                         - available in data: 1995, 2000, 2005, 2010, 2014
                         - other years between 1995 and 2014 are interpolated
                         - for years outside range, indicator is scaled
                             proportionally to GDP
        variable_name (str): select one variable, i.e.:
            'NW.PCA.TO': Produced capital stock of country
                         incl. manufactured or built assets such as machinery,
                         equipment, and physical structures
                         and value of built-up urban land (24% mark-up)
            'NW.PCA.PC': Produced capital stock per capita
                         incl. manufactured or built assets such as machinery,
                         equipment, and physical structures
                         and value of built-up urban land (24% mark-up)
            'NW.NCA.TO': Total natural capital of country. Natural capital
                        includes the valuation of fossil fuel energy (oil, gas,
                        hard and soft coal) and minerals (bauxite, copper, gold,
                        iron ore, lead, nickel, phosphate, silver, tin, and zinc),
                        agricultural land (cropland and pastureland),
                        forests (timber and some nontimber forest products), and
                        protected areas.
            'NW.TOW.TO': Total wealth of country.
            Note: Values are measured at market exchange rates in constant 2014 US dollars,
                        using a country-specific GDP deflator.
        no_land (boolean): If True, return produced capital without built-up land value
                        (applies to 'NW.PCA.*' only). Default = True.
    """
    try:
        fname = os.path.join(SYSTEM_DIR, FILE_WORLD_BANK_WEALTH_ACC)
        if not os.path.isfile(fname):
            fname = os.path.join(SYSTEM_DIR, 'Wealth-Accounts_CSV',
                                 FILE_WORLD_BANK_WEALTH_ACC)
        if not os.path.isfile(fname):
            if not os.path.isdir(
                    os.path.join(SYSTEM_DIR, 'Wealth-Accounts_CSV')):
                os.mkdir(os.path.join(SYSTEM_DIR, 'Wealth-Accounts_CSV'))
            file_down = download_file(WORLD_BANK_WEALTH_ACC)
            zip_ref = zipfile.ZipFile(file_down, 'r')
            zip_ref.extractall(os.path.join(SYSTEM_DIR, 'Wealth-Accounts_CSV'))
            zip_ref.close()
            os.remove(file_down)
            LOGGER.debug('Download and unzip complete. Unzipping %s',
                         str(fname))

        data_wealth = pd.read_csv(fname, sep=',', index_col=None, header=0)
    except:
        LOGGER.error('Downloading World Bank Wealth Accounting Data failed.')
        raise

    data_wealth = data_wealth[
        data_wealth['Country Code'].str.contains(cntry_iso)
        & data_wealth['Indicator Code'].str.contains(
            variable_name)].loc[:, '1995':'2014']
    years = list(map(int, list(data_wealth)))
    if data_wealth.size == 0 and 'NW.PCA.TO' in variable_name:  # if country is not found in data
        LOGGER.warning(
            'No data available for country. Using non-financial wealth instead'
        )
        gdp_year, gdp_val = gdp(cntry_iso, ref_year)
        fac = wealth2gdp(cntry_iso)[1]
        return gdp_year, np.around((fac * gdp_val), 1), 0
    if ref_year in years:  # indicator for reference year is available directly
        result = data_wealth.loc[:, np.str(ref_year)].values[0]
    elif ref_year > np.min(years) and ref_year < np.max(years):  # interpolate
        result = np.interp(ref_year, years, data_wealth.values[0, :])
    elif ref_year < np.min(years):  # scale proportionally to GDP
        gdp_year, gdp0_val = gdp(cntry_iso, np.min(years))
        gdp_year, gdp_val = gdp(cntry_iso, ref_year)
        result = data_wealth.values[0, 0] * gdp_val / gdp0_val
        ref_year = gdp_year
    else:
        gdp_year, gdp0_val = gdp(cntry_iso, np.max(years))
        gdp_year, gdp_val = gdp(cntry_iso, ref_year)
        result = data_wealth.values[0, -1] * gdp_val / gdp0_val
        ref_year = gdp_year
    if 'NW.PCA.' in variable_name and no_land:
        # remove value of built-up land from produced capital
        result = result / 1.24
    return ref_year, np.around(result, 1), 1
예제 #47
0
	fileName = goFitsFile.split('/')[-1]
	## if file not found, skip. A GO file was written, but no data was taken
	try:
		dataHdu = fits.open('/lustre/flag/' + projectSession + '/BF/' + fileName[:-5] + 'A.fits')
	except IOError:
		continue
		pass
	hdu = fits.open(goFitsFile)
	procNameList.append(hdu[0].header['PROCNAME'])
	objList.append(hdu[0].header['OBJECT'])
	obsTimeList.append(fileName[:-5])
	intList.append(dataHdu[0].header['REQSTI'])
	modeList.append(dataHdu[0].header['MODENAME'])
	scanList.append(hdu[0].header['SCAN'])
	blockNameList.append(hdu[0].header['BLOCK'])
	seqSizeList.append(np.str(hdu[0].header['PROCSIZE']))
	seqNumList.append(np.str(hdu[0].header['PROCSEQN']))

## create dictionary that will write out as table
data = {'Time':obsTimeList, 
	'ProcedureName': procNameList, 
	'Object': objList,
	'ScanNumber':scanList,
	'ScheduleBlock': blockNameList,
	'SequenceNumber': seqNumList,
	'TotalSequenceNumber': seqSizeList, 
	'IntLength': intList,
	'Mode': modeList
}

ascii.write(data, output = projectSession + '_LOG.txt', names=['Time', 'ProcedureName', 'Object', 'ScanNumber', 'ScheduleBlock', 'SequenceNumber', 'TotalSequenceNumber', 'IntLength', 'Mode'])
예제 #48
0
파일: VCA.py 프로젝트: nic-kill/Turbulence
refVel = hdu[0].header['CRVAL3']/1000. ## units of km/s
refVelPix = hdu[0].header['CRPIX3']
## compute initial velocity
initVel = refVel - refVelPix*velRes
lastVel = initVel + velRes * specSize

## get pixel size 
pixRes = np.abs(hdu[0].header['CDELT1'])

# get beam resolution (major axis)
angRes = hdu[0].header['BMAJ']

## compute the velocity resolutions based on user provided bins
totVel = np.abs(lastVel - initVel)
deltaV = np.abs(lastVel - initVel) / (N_VCA - 1)
print('The resolution of the spectral axis will be down-sampled from %.2f [km/s] to %.2f [km/s] in %s intervals of %.2f [km/s]' % (np.abs(velRes), totVel, np.str(N_VCA), deltaV))
channelWidthArr = np.linspace(np.abs(velRes), totVel, N_VCA) 

## re-read input fits as a spectral-cube object
hdu.close()
origCube = SpectralCube.read(fitsName)

## specify global Gaussian factors
fwhm_factor = np.sqrt(8 * np.log(2))

## create lists to store the weighted average slope values
finalSlopes = []
finalErrors = []

cnt = 0 
## Now, loop over each velocity resolution to down-sample the velocity axis and compute the SPS at each velocity resolution 
예제 #49
0
alla = {}
variance_svrg_data={}
variance_sgd_data={}
importance_weights_data={}
rewards_snapshot_data={}
rewards_subiter_data={}
n_sub_iter_data={}
ar_data = {}
all_policy_param_data = {}
parallel_sampler.initialize(4)
for k in range(5):
    if (load_policy):
#        snap_policy.set_param_values(np.loadtxt('policy.txt'), trainable=True)
#        policy.set_param_values(np.loadtxt('policy.txt'), trainable=True)
        snap_policy.set_param_values(np.loadtxt('pcb' + np.str(k+1) + '.txt'), trainable=True)
        policy.set_param_values(np.loadtxt('pcb' + np.str(k+1) + '.txt'), trainable=True)
    else:
        policy.set_param_values(snap_policy.get_param_values(trainable=True), trainable=True) 
    avg_return = np.zeros(s_tot)
    #np.savetxt("policy_novar.txt",snap_policy.get_param_values(trainable=True))
    n_sub_iter=[]
    rewards_sub_iter=[]
    rewards_snapshot=[]
    importance_weights=[]
    variance_svrg = []
    variance_sgd = []
    all_rew = []
    all_policy_param = []
    j=0
    while j<s_tot-N:
예제 #50
0
def main(args):
    # create the outputs folder
    os.makedirs('outputs', exist_ok=True)

    #     datasett = pd.read_csv('Mydata.txt', sep=",", header=None)
    #     print(datasett.head())

    #     datam = preprocessing.normalize(datatasett)

    #     datasett.shape
    #     datasett.head()
    #     datasett.describe()

    #     X = dataset.iloc[:, [2,3,4,5,6]].values
    #     y = dataset.iloc[:, 8].values

    #     X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)

    #     regressor = LinearRegression()
    #     regressor.fit(X_train, y_train)

    #     print(regressor.intercept_)

    #     print(regressor.coef_)

    #     y_pred = regressor.predict(X_test)

    #     df = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred})
    #     df
    #     print(df.head())

    #     print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
    #     print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
    #     print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
    #     print("Training set {:.2f}".format(regressor.score(X_train, y_train)))
    #     print("Test set {:.2f}".format(regressor.score(X_test, y_test)))
    #     # Log arguments
    run.log('Kernel type', np.str(args.kernel))
    run.log('Penalty', np.float(args.penalty))

    # Load iris dataset
    X, y = datasets.load_iris(return_X_y=True)

    #dividing X,y into train and test data
    x_train, x_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.2,
                                                        random_state=223)
    data = {
        'train': {
            'X': x_train,
            'y': y_train
        },
        'test': {
            'X': x_test,
            'y': y_test
        }
    }

    # train a SVM classifier
    svm_model = SVC(kernel=args.kernel, C=args.penalty,
                    gamma='scale').fit(data['train']['X'], data['train']['y'])
    svm_predictions = svm_model.predict(data['test']['X'])

    # accuracy for X_test
    accuracy = svm_model.score(data['test']['X'], data['test']['y'])
    print('Accuracy of SVM classifier on test set: {:.2f}'.format(accuracy))
    run.log('Accuracy', np.float(accuracy))

    # precision for X_test
    precision = precision_score(svm_predictions,
                                data["test"]["y"],
                                average='weighted')
    print('Precision of SVM classifier on test set: {:.2f}'.format(precision))
    run.log('precision', precision)

    # recall for X_test
    recall = recall_score(svm_predictions,
                          data["test"]["y"],
                          average='weighted')
    print('Recall of SVM classifier on test set: {:.2f}'.format(recall))
    run.log('recall', recall)

    # f1-score for X_test
    f1 = f1_score(svm_predictions, data["test"]["y"], average='weighted')
    print('F1-Score of SVM classifier on test set: {:.2f}'.format(f1))
    run.log('f1-score', f1)

    # create a confusion matrix
    labels = ['Iris-setosa', 'Iris-versicolor', 'Iris-virginica']
    labels_numbers = [0, 1, 2]
    cm = confusion_matrix(y_test, svm_predictions, labels_numbers)
    log_confusion_matrix(cm, labels)

    #     files saved in the "outputs" folder are automatically uploaded into run history
    joblib.dump(svm_model, os.path.join('outputs', args.modelname))
    run.log('Model Name', np.str(args.modelname))
        rgb_files = np.array(sorted(glob.glob(images)))
        print("Loaded files: ", len(files))

        if use_random_idx:
            val_idx = np.random.choice(np.arange(0, len(files), 1), size=int(num_val), replace=False)
            print("Chosen Files \n", val_idx)
            files = files[val_idx]
        else:
            num_val = len(files)

        data = {}
        iteration = 0

        #=====================

        json_addr = json_path + scene + json_name + 'val_' + np.str(len(files)) + '.json'
        print("json_addr: ", json_addr)
        for idx, file in enumerate(files):

            str_num = file.split(data_path + folder_to_save)[1]
            img_number = str_num.split(image_ext)[0]
            label_addr = file

            print("label_addr: ", label_addr)
            print('Image: {}/{}'.format(iteration, len(files)))

            rgb_img = np.array(Image.open(rgb_files[idx]))
            label_img = Image.open(label_addr)
            object_ids = np.unique(np.array(label_img))
            print("GT Affordances:", object_ids)
예제 #52
0
def custom_axis_formater_inset(custom_title, custom_x_label, custom_y_label,
                               xmin, xmax, ymin, ymax, xprec, yprec):

    # get axes and tick from plot
    ax = plt.gca()
    # set the number of major and minor bins for x,y axes
    # prune='lower' --> remove lowest tick label from x axis
    xmajorLocator = MaxNLocator(6, prune='lower')
    xmajorFormatter = FormatStrFormatter('%.' + np.str(xprec) + 'f')
    xminorLocator = MaxNLocator(12)

    ymajorLocator = MaxNLocator(6)
    ymajorFormatter = FormatStrFormatter('%.' + np.str(yprec) + 'f')
    yminorLocator = MaxNLocator(12)

    # format major and minor ticks width, length, direction
    ax.tick_params(which='both', width=1, direction='in', labelsize=20)
    ax.tick_params(which='major', length=6)
    ax.tick_params(which='minor', length=4)

    # set axes thickness
    ax.spines['top'].set_linewidth(1.5)
    ax.spines['bottom'].set_linewidth(1.5)
    ax.spines['right'].set_linewidth(1.5)
    ax.spines['left'].set_linewidth(1.5)

    ax.xaxis.set_major_locator(xmajorLocator)
    ax.yaxis.set_major_locator(ymajorLocator)

    ax.xaxis.set_major_formatter(xmajorFormatter)
    ax.yaxis.set_major_formatter(ymajorFormatter)

    # for the minor ticks, use no labels; default NullFormatter
    ax.xaxis.set_minor_locator(xminorLocator)
    ax.yaxis.set_minor_locator(yminorLocator)

    # grid and axes are drawn below the data plot
    ax.set_axisbelow(True)

    # convert x axis units to radians
    #ax.convert_xunits(radians)

    # add x,y grids to plot area
    ax.xaxis.grid(True,
                  zorder=0,
                  color='gainsboro',
                  linestyle='-',
                  linewidth=1)
    ax.yaxis.grid(True,
                  zorder=0,
                  color='gainsboro',
                  linestyle='-',
                  linewidth=1)

    # set axis labels
    #ax.set_xlabel(custom_x_label, fontsize=20)
    #ax.set_ylabel(custom_y_label, fontsize=20)

    # set plot title
    #ax.set_title(custom_title, loc='right', fontsize=12)

    return
                dataset, config, image_idx, use_mini_mask=False)
            visualize.display_instances(image,
                                        bbox,
                                        mask,
                                        class_ids,
                                        dataset.class_names,
                                        ax=ax[i // int(np.sqrt(limit)),
                                              i % int(np.sqrt(limit))],
                                        captions=captions[class_ids].tolist())
            # log("molded_image", image)
            # log("mask", mask)
            # log("class_ids", class_ids)
            ### print("captions", np.array(dataset.class_names)[class_ids].tolist())
        plt.savefig(os.getcwd() + save_to_folder +
                    "gt_affordance_labels/gt_affordance_labels_" +
                    np.str(idx_samples) + ".png",
                    bbox_inches='tight')

    ##################################
    ###  Image Size Stats
    ##################################
    print('\n --------------- Image Size ---------------')

    image_shape = np.array([s['shape'] for s in stats])
    image_color = np.array([s['color'] for s in stats])
    print("Height  mean: {:.2f}  median: {:.2f}  min: {:.2f}  max: {:.2f}".
          format(np.mean(image_shape[:, 0]), np.median(image_shape[:, 0]),
                 np.min(image_shape[:, 0]), np.max(image_shape[:, 0])))
    print("Width   mean: {:.2f}  median: {:.2f}  min: {:.2f}  max: {:.2f}".
          format(np.mean(image_shape[:, 1]), np.median(image_shape[:, 1]),
                 np.min(image_shape[:, 1]), np.max(image_shape[:, 1])))
예제 #54
0
                label=label_mdl)
tx2, = plt.plot(FM_thickness_range,
                FM_thickness_range * slope_mdl + intercept_mdl,
                'k-',
                mfc='lightgray',
                markersize=6,
                label=label_mdl)
# display the legend for the defined labels
plt.legend([tx1, tx2], [label_mdl, 'fit'],
           loc='upper left',
           fontsize=14,
           frameon=True)
plt.figtext(0.05,
            0.92,
            r'$MDL:$ ' +
            np.str(np.round(-1.0 * intercept_mdl / slope_mdl, 3)) + r'$\;nm$' +
            '	a: ' + np.str(slope_mdl) + '	b: ' + np.str(intercept_mdl) +
            '\n' + r'$R^2:$' + np.str(r_value_mdl * r_value_mdl) + '	S: ' +
            np.str(std_err_mdl),
            size=14)

plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)

custom_axis_formater(plot_title, axis_label_nm, axis_label_msd, xmin, xmax,
                     ymin, ymax, xprec, yprec)
#-----------------------------------------------------------------------------------------------------

fig1 = plt.figure(figsize=(9, 9), dpi=72)
fig1.canvas.set_window_title(version_name)
spec1 = gridspec.GridSpec(ncols=1, nrows=1)
예제 #55
0
     print "Unix time: %f" % RB.get_times(header)[1][0]

     if make_highres_plot is True:# and k==1:
          print "Making high res plot"
          arr_highres = RB.reorg_array(header, data)#, rbtime=625)
          nto = len(arr_highres)
          arr_highres = arr_highres[:nto//RB.nperpacket*RB.nperpacket]
#          arr_highres = arr_highres[:nto//25*25]

          arr_highres = np.abs(arr_highres.reshape(-1, RB.nperpacket, npol, nfreq))**2
#          arr_highres = np.abs(arr_highres.reshape(-1, 25, npol, nfreq))**2

          arr_highres = arr_highres.sum(1)
#          arr_highres = arr_highres.reshape(-1, 4, 512, 2).mean(-1)#          arr_highres1d = r
          np.save('dd' + np.str(k), arr_highres)
#          rbf.plot_waterfall(arr_highres.sum(1), outfile + np.str(k) + '.png')
          continue
#          rbf.plot_1d(arr_highres1d, outfile + '1d.png')


     times_o = RB.get_times(header, False)

#     print "RA: %d %f" % (times_o[0], eph.transit_RA(times_o[0]))

     # In case packets straddle multiple files, don't "correlate_and_fill"
     # until you have 3 files
     if accumulate == True:

          header_acc.append(header)
          data_acc.append(data)
def SEIR_model_publish_w_risk(metro_pop,
                              school_calendar,
                              beta0,
                              phi,
                              sigma,
                              gamma,
                              eta,
                              mu,
                              omega,
                              tau,
                              nu,
                              pi,
                              rho,
                              n_age,
                              n_risk,
                              total_time,
                              interval_per_day,
                              shift_week,
                              time_begin,
                              time_begin_sim,
                              initial_i,
                              sd_date,
                              sd_level,
                              trigger_type,
                              close_trigger,
                              reopen_trigger,
                              monitor_lag,
                              report_rate,
                              deterministic=True,
                              print_vals=True,
                              extra_params=None):
    """
    :param metro_pop: np.array of shape (n_age, n_risk)
    :param school_calendar: np.array of shape(), school calendar from data
    :param beta0: np.array of shape (n_age, ), baseline beta
    :param phi: dict of 4 np.array of shape (n_age, n_age),
        contact matrix of all, school, work, home
    :param sigma: np.array of shape (n_age, ), rate of E to I
    :param gamma: np.array of shape (3, n_age), rate of I to R
    :param eta: old: np.array of shape (n_age, ), rate from I^y to I^H
    :param mu: np.array of shape (n_age, ), rate from I^H to D
    :param omega: np.array of shape (5, n_age), relative infectiousness of I / P
    :param tau: np.array of shape (n_age, ), symptomatic rate of I
    :param nu: np.array of shape (n_risk, n_age), case fatality rate in I^H
    :param pi: np.array of shape (n_risk, n_age), Pr[I^Y to I^H]
    :param rho: np.array of shape (2, n_age), rate P^A/P^I -> I^A/I^Y
    :param n_age: int, number of age groups
    :param n_risk: int, number of risk groups
    :param total_time: int, total length of simulation in (Days)
    :param interval_per_day: int, number of intervals within a day
    :param shift_week: int, shift week !!
    :param time_begin: datetime, time begin
    :param time_begin_sim: int, time to begin simulation
    :param initial_i: np.array of shape(n_age, n_risk), I0
    :param sd_date: list of 2 int, time to start and end social distancing
    :param sd_level: float, % reduction in non-household contacts
    :param trigger_type: str, {'cml', 'current', 'new'}
    :param close_trigger: str, format: type_population_number;
        example: number_all_5 or ratio_school_1 or date__20200315
    :param reopen_trigger: str, format: type_population_number,
        example: monitor_all_75 (75% reduction), no_na_12 (12 weeks)
    :param monitor_lag: int, time lag between surveillance and real time in days
    :param report_rate: float, proportion Y can seen
    :param deterministic: boolean, whether to remove poisson stochasticity
    :param extra_params: dictionary of extra parameters for subgroup if not None
    :return: compt_s, compt_e, compt_ia, compt_ih, compt_ih, compt_r, compt_d,
        compt_e2compt_iy
    """

    date_begin = dt.datetime.strptime(np.str(time_begin_sim), '%Y%m%d') + \
                 dt.timedelta(weeks=shift_week)
    sd_begin_date = dt.datetime.strptime(np.str(sd_date[0]), '%Y%m%d')
    sd_end_date = dt.datetime.strptime(np.str(sd_date[1]), '%Y%m%d')
    sim_begin_idx = (date_begin - time_begin).days
    school_calendar = school_calendar[sim_begin_idx:]

    # Contact matrix for 5 or more age groups, adjusted to time-step
    #if subgroup in ['Grocery', 'Construction', 'Teachers']: # phi = matrices

    phi_all = phi['phi_all'] / interval_per_day
    phi_school = phi['phi_school'] / interval_per_day
    phi_work = phi['phi_work'] / interval_per_day
    phi_home = phi['phi_home'] / interval_per_day
    phi_other = phi[
        'phi_other'] / interval_per_day  #phi_all - phi_school - phi_work - phi_home

    # Get extra parameters if any
    if extra_params is not None:
        # Subgroup name and parameter names/values
        subgroup = list(extra_params.keys())[0]
        extra_params_details = extra_params[subgroup]

        # Get extra parameters names and values
        extra_params_names = extra_params_details[0]
        extra_params_vals = list(extra_params_details[1])

        if print_vals:
            print('Subgroup:', subgroup)
            print('Subgroup parameters', extra_params_names, extra_params_vals)

        if subgroup == 'Grocery':
            # Grocery store specific contacts and non g_store other contacts
            phi_g_store = phi['phi_g_store'] / interval_per_day
            phi_other_non_gs = phi_other - phi_g_store

            # Work contacts split for grocery workers: work on weekends
            phi_work_GW = phi_work.copy() * 0
            phi_work_GW[-1, :] = phi_work[-1, :]
            phi_work[-1, :] = phi_work[-1, :] * 0

            # Contact reduction at grocery store for shoppers due to SD
            g_shopper_sd_idx = extra_params_names.index('g_shopper_sd')
            g_shopper_sd = extra_params_vals[g_shopper_sd_idx]

            # Contact reduction at grocery store for workers due to SD
            g_worker_sd_idx = extra_params_names.index('g_worker_sd')
            g_worker_sd = extra_params_vals[g_worker_sd_idx]

        elif subgroup == 'Construction':
            # Social distancing on construction sites
            delta_CW_idx = extra_params_names.index('delta_CW')
            delta_CW = extra_params_vals[delta_CW_idx]

            # Proportion of construction workers allowed to work
            prop_CW_idx = extra_params_names.index('prop_CW')
            prop_CW = extra_params_vals[prop_CW_idx]

            # Work contacts split for construction workers
            phi_work_CW = phi_work.copy() * 0
            phi_work_CW[-1, -1] = phi_work[-1, -1]
            phi_work[-1, -1] = 0

        elif subgroup == 'Teachers':
            # get parameters - same as beta config

            # proportion of people in school (t, s, v)
            prop_school_sd_idx = extra_params_names.index('prop_school_sd')
            prop_school_sd = extra_params_vals[prop_school_sd_idx]

            # susceptibility
            suscep_param_idx = extra_params_names.index('suscep_param')
            suscep_param = extra_params_vals[suscep_param_idx]

            # infectiousness
            infect_param_idx = extra_params_names.index('infect_param')
            infect_param = extra_params_vals[infect_param_idx]

    if print_vals:
        print('Contact matrices\n\
        All: {}\nSchool: {}\nWork: {}\nHome: {}\nOther places: {}'\
            .format(phi_all * interval_per_day, phi_school * interval_per_day,
                    phi_work * interval_per_day, phi_home * interval_per_day,
                    phi_other * interval_per_day))

    # Rate from symptom onset to hospitalized
    eta = eta / interval_per_day
    if print_vals:
        print('eta', eta)
        print('Duration from symptom onset to hospitalized', 1 / eta / \
            interval_per_day)

    # Symptomatic rate
    if print_vals:
        print('Asymptomatic rate', 1 - tau)

    # Rate from hospitalized to death
    mu = mu / interval_per_day
    if print_vals:
        print('mu', mu)
        print('Duration from hospitalized to death', 1 / mu / interval_per_day)

    # Relative Infectiousness for Ia, Iy, It compartment
    omega_a, omega_y, omega_h, omega_pa, omega_py = omega  # CHANGED
    if print_vals:
        print('Relative infectiousness for Ia, Iy, Ih, E is {0} {1} {2} {3}'\
            .format(*omega))

    # Incubation period
    sigma = sigma / interval_per_day
    if print_vals:
        print('sigma', sigma)
        print('Incubation period is {}'.format(1 / sigma / interval_per_day))

    # Recovery rate
    gamma_a, gamma_y, gamma_h = gamma / interval_per_day
    if print_vals:
        print('gamma', gamma_a, gamma_y, gamma_h)
        print('Infectious period for Ia, Iy, Ih is {0} {1} {2}'\
            .format(1 / gamma_a.mean() / interval_per_day,
                    1 / gamma_y.mean() / interval_per_day,
                    1 / gamma_h.mean() / interval_per_day))

    # Rate from pre-symptomatic to symptomatic / asymptomatic
    rho_a, rho_y = rho / interval_per_day  # NEW
    if print_vals:
        print('rho', rho_a, rho_y)
        print('Pre-(a)symptomatic period for Pa, Py, is {0} {1}'\
            .format(1 / rho_a.mean() / interval_per_day,
                    1 / rho_y.mean() / interval_per_day))

    # Case Fatality Rate
    nu_l, nu_h = nu
    if print_vals:
        print('Hosp fatality rate, low risk: {0}. high risk: {1}'.format(*nu))

    # Probability symptomatic go to hospital
    pi_l, pi_h = pi
    if print_vals:
        print('Probability of symptomatic individuals go to hospital', pi)

    # Compartments, axes = (time, age, risk)
    compt_s = np.zeros(shape=(total_time * interval_per_day, n_age, n_risk))
    compt_e, compt_pa, compt_py = compt_s.copy(), compt_s.copy(), compt_s.copy(
    )
    compt_ia, compt_iy = compt_s.copy(), compt_s.copy()
    compt_ih, compt_r, compt_d = compt_s.copy(), compt_s.copy(), compt_s.copy()

    # Transitions
    compt_e2compt_p, compt_e2compt_py = compt_s.copy(), compt_s.copy()
    compt_p2compt_i = compt_s.copy()  # sum of pa2ia and py2iy
    compt_pa2compt_ia, compt_py2compt_iy = compt_s.copy(), compt_s.copy()
    compt_iy2compt_ih, compt_h2compt_d = compt_s.copy(), compt_s.copy()

    # Set initial value for S compartment
    compt_s[0] = metro_pop - initial_i
    compt_py[0] = initial_i

    # Placeholders for
    school_closed = False
    school_reopened = False
    school_close_date = 'NA'
    school_reopen_date = 'NA'

    # Iterate over intervals
    print("sd_all_contacts for loop")
    for t in range(1, total_time * interval_per_day):

        days_from_t0 = np.floor((t + 0.1) / interval_per_day)
        t_date = date_begin + dt.timedelta(days=days_from_t0)

        # Use appropriate contact matrix
        # Use different phi values on different days of the week
        if sd_begin_date <= t_date < sd_end_date:
            contact_reduction = sd_level
            sd_active = 1.
        else:
            contact_reduction = 0.
            sd_active = 0.

        # Different computations for subgroups
        if extra_params is not None:
            # applying params to reduce contacts
            # contact_reduction - in sd_list, reduces contacts
            if subgroup == 'Grocery':
                if sd_active > 0:
                    GShopper_mult = 1 - g_shopper_sd
                    GWorker_mult = 1 - g_worker_sd
                else:
                    GShopper_mult = 1.
                    GWorker_mult = 1.

                phi_weekday = (1 - contact_reduction) * \
                    (phi_home + phi_work + phi_school + phi_other_non_gs) + \
                    phi_work_GW * GWorker_mult + phi_g_store * GShopper_mult
                phi_weekend = (1 - contact_reduction) * (phi_home + \
                    phi_other_non_gs) + phi_work_GW * GWorker_mult + \
                    phi_g_store * GShopper_mult

            elif subgroup == 'Construction':
                # Contacts only adjusted when social distancing in place
                if sd_active > 0:
                    CW_multiplier = delta_CW * prop_CW
                else:
                    CW_multiplier = 1

                # Construction workers' work contacts not impacted the same
                # when social distancing inplace
                phi_weekday = (1 - contact_reduction) * \
                    (phi_home + phi_work + phi_school + phi_other) + \
                    phi_work_CW  * CW_multiplier
                phi_weekend = (1 - contact_reduction) * (phi_home + phi_other)

            elif subgroup == 'Teachers':
                # consistent reduction for groups
                # contact_reduction = ?
                if sd_active > 0:
                    school_contact_reduction = prop_school_sd
                else:
                    school_contact_reduction = 0

                phi_weekday = (1 - contact_reduction) * \
                    (phi_home + phi_work + phi_other) + \
                    phi_school * (1 - school_contact_reduction)
                phi_weekend = (1 - contact_reduction) * (phi_home + phi_other)

        else:
            # No subgroup
            phi_weekday = (1 - contact_reduction) * phi_all
            phi_weekend = (1 - contact_reduction) * (phi_all - phi_school - \
                phi_work)

        phi_weekday_holiday = phi_weekend
        phi_weekday_long_break = phi_weekday - (1 - contact_reduction) * \
            phi_school
        if subgroup == 'Teachers':
            phi_weekday_long_break = phi_weekday - (1 - school_contact_reduction) * \
            phi_school

        phi_open = [
            phi_weekday, phi_weekend, phi_weekday_holiday,
            phi_weekday_long_break
        ]
        phi_close = [
            phi_weekday - (1 - contact_reduction) * phi_school, phi_weekend,
            phi_weekday_holiday, phi_weekday_long_break
        ]

        # 1-weekday, 2-weekend, 3-weekday holiday, 4-weekday long break
        calendar_code = int(school_calendar[int(days_from_t0)])
        if school_closed == school_reopened:
            phi = phi_open[calendar_code - 1]
        else:
            phi = phi_close[calendar_code - 1]

        temp_s = np.zeros(shape=(n_age, n_risk))
        temp_e = np.zeros(shape=(n_age, n_risk))
        temp_e2py = np.zeros(shape=(n_age, n_risk))
        temp_e2p = np.zeros(shape=(n_age, n_risk))
        temp_pa = np.zeros(shape=(n_age, n_risk))
        temp_py = np.zeros(shape=(n_age, n_risk))
        temp_pa2ia = np.zeros(shape=(n_age, n_risk))
        temp_py2iy = np.zeros(shape=(n_age, n_risk))
        temp_p2i = np.zeros(shape=(n_age, n_risk))
        temp_ia = np.zeros(shape=(n_age, n_risk))
        temp_iy = np.zeros(shape=(n_age, n_risk))
        temp_ih = np.zeros(shape=(n_age, n_risk))
        temp_r = np.zeros(shape=(n_age, n_risk))
        temp_d = np.zeros(shape=(n_age, n_risk))
        temp_iy2ih = np.zeros(shape=(n_age, n_risk))
        temp_h2d = np.zeros(shape=(n_age, n_risk))

        ## within nodes
        # for each age group
        for a in range(n_age):

            # for each risk group
            for r in range(n_risk):

                rate_s2e = 0.

                if r == 0:  # p0 is low-risk group, 1 is high risk group
                    temp_nu = nu_l
                    temp_pi = pi_l
                else:
                    temp_nu = nu_h
                    temp_pi = pi_h

                # Calculate infection force (F)
                for a2 in range(n_age):
                    for r2 in range(n_risk):
                        # multiply omega_y by number - relative infectiousness per age group, each age group has a specific one
                        rate_s2e += suscep_param[a] * infect_param[a2] * beta0[a2] * phi[a, a2] * \
                            compt_s[t - 1, a, r] * \
                            (omega_a[a2] * compt_ia[t - 1, a2, r2] + \
                            omega_y[a2] * compt_iy[t - 1, a2, r2] + \
                            omega_pa[a2] * compt_pa[t - 1, a2, r2] + \
                            omega_py[a2] * compt_py[t - 1, a2, r2]) / \
                            np.sum(metro_pop[a2])

                if np.isnan(rate_s2e):
                    rate_s2e = 0

                # Rate change of each compartment
                # (besides S -> E calculated above)
                rate_e2p = sigma[a] * compt_e[t - 1, a, r]
                rate_pa2ia = rho_a[a] * compt_pa[t - 1, a, r]
                rate_py2iy = rho_y[a] * compt_py[t - 1, a, r]
                rate_ia2r = gamma_a[a] * compt_ia[t - 1, a, r]
                rate_iy2r = (1 - temp_pi[a]) * gamma_y[a] * \
                    compt_iy[t - 1, a, r]
                rate_ih2r = (1 - temp_nu[a]) * gamma_h[a] * \
                    compt_ih[t - 1, a, r]
                rate_iy2ih = temp_pi[a] * eta[a] * compt_iy[t - 1, a, r]
                rate_ih2d = temp_nu[a] * mu[a] * compt_ih[t - 1, a, r]

                # Stochastic rates
                if not deterministic:
                    rate_s2e = np.random.poisson(rate_s2e)
                if np.isinf(rate_s2e):
                    rate_s2e = 0

                if not deterministic:
                    rate_e2p = np.random.poisson(rate_e2p)
                if np.isinf(rate_e2p):
                    rate_e2p = 0

                if not deterministic:
                    rate_pa2ia = np.random.poisson(rate_pa2ia)
                if np.isinf(rate_pa2ia):
                    rate_pa2ia = 0

                if not deterministic:
                    rate_py2iy = np.random.poisson(rate_py2iy)
                if np.isinf(rate_py2iy):
                    rate_py2iy = 0  # NEW

                if not deterministic:
                    rate_ia2r = np.random.poisson(rate_ia2r)
                if np.isinf(rate_ia2r):
                    rate_ia2r = 0

                if not deterministic:
                    rate_iy2r = np.random.poisson(rate_iy2r)
                if np.isinf(rate_iy2r):
                    rate_iy2r = 0

                if not deterministic:
                    rate_ih2r = np.random.poisson(rate_ih2r)
                if np.isinf(rate_ih2r):
                    rate_ih2r = 0

                if not deterministic:
                    rate_iy2ih = np.random.poisson(rate_iy2ih)
                if np.isinf(rate_iy2ih):
                    rate_iy2ih = 0

                if not (deterministic):
                    rate_ih2d = np.random.poisson(rate_ih2d)
                if np.isinf(rate_ih2d):
                    rate_ih2d = 0

                # In the below block, calculate values + deltas of each category
                # in SEIR, for each age-risk category, at this timepoint

                d_s = -rate_s2e
                temp_s[a, r] = compt_s[t - 1, a, r] + d_s
                if temp_s[a, r] < 0:
                    rate_s2e = compt_s[t - 1, a, r]
                    temp_s[a, r] = 0

                d_e = rate_s2e - rate_e2p
                temp_e[a, r] = compt_e[t - 1, a, r] + d_e
                if temp_e[a, r] < 0:
                    rate_e2p = compt_e[t - 1, a, r] + rate_s2e
                    temp_e[a, r] = 0

                temp_e2p[a, r] = rate_e2p
                temp_e2py[a, r] = tau[a] * rate_e2p
                if temp_e2py[a, r] < 0:
                    rate_e2p = 0
                    temp_e2p[a, r] = 0
                    temp_e2py[a, r] = 0

                d_pa = (1 - tau[a]) * rate_e2p - rate_pa2ia
                temp_pa[a, r] = compt_pa[t - 1, a, r] + d_pa
                temp_pa2ia[a, r] = rate_pa2ia
                if temp_pa[a, r] < 0:
                    rate_pa2ia = compt_pa[t - 1, a,
                                          r] + (1 - tau[a]) * rate_e2p
                    temp_pa[a, r] = 0
                    temp_pa2ia[a, r] = rate_pa2ia

                d_py = tau[a] * rate_e2p - rate_py2iy
                temp_py[a, r] = compt_py[t - 1, a, r] + d_py
                temp_py2iy[a, r] = rate_py2iy
                if temp_py[a, r] < 0:
                    rate_py2iy = compt_py[t - 1, a, r] + tau[a] * rate_e2p
                    temp_py[a, r] = 0
                    temp_py2iy[a, r] = rate_py2iy

                d_ia = rate_pa2ia - rate_ia2r
                temp_ia[a, r] = compt_ia[t - 1, a, r] + d_ia
                if temp_ia[a, r] < 0:
                    rate_ia2r = compt_ia[t - 1, a, r] + rate_pa2ia
                    temp_ia[a, r] = 0

                d_iy = rate_py2iy - rate_iy2r - rate_iy2ih
                temp_iy[a, r] = compt_iy[t - 1, a, r] + d_iy
                if temp_iy[a, r] < 0:
                    rate_iy2r = (compt_iy[t - 1, a, r] + rate_py2iy) * \
                        rate_iy2r / (rate_iy2r + rate_iy2ih)
                    rate_iy2ih = compt_iy[t - 1, a, r] + rate_py2iy - rate_iy2r
                    temp_iy[a, r] = 0

                temp_iy2ih[a, r] = rate_iy2ih
                if temp_iy2ih[a, r] < 0:
                    temp_iy2ih[a, r] = 0

                d_ih = rate_iy2ih - rate_ih2r - rate_ih2d
                temp_ih[a, r] = compt_ih[t - 1, a, r] + d_ih
                if temp_ih[a, r] < 0:
                    rate_ih2r = (compt_ih[t - 1, a, r] + rate_iy2ih) * \
                        rate_ih2r / (rate_ih2r + rate_ih2d)
                    rate_ih2d = compt_ih[t - 1, a, r] + rate_iy2ih - rate_ih2r
                    temp_ih[a, r] = 0

                d_r = rate_ia2r + rate_iy2r + rate_ih2r
                temp_r[a, r] = compt_r[t - 1, a, r] + d_r

                d_d = rate_ih2d
                temp_h2d[a, r] = rate_ih2d
                temp_d[a, r] = compt_d[t - 1, a, r] + d_d

        # We are now done calculating compartment values for each
        # age-risk category
        # Copy this vector array as a slice on time axis
        compt_s[t] = temp_s
        compt_e[t] = temp_e
        compt_pa[t] = temp_pa
        compt_py[t] = temp_py
        compt_ia[t] = temp_ia
        compt_iy[t] = temp_iy
        compt_ih[t] = temp_ih
        compt_r[t] = temp_r
        compt_d[t] = temp_d

        compt_e2compt_p[t] = temp_e2p
        compt_e2compt_py[t] = temp_e2py
        compt_pa2compt_ia[t] = temp_pa2ia
        compt_py2compt_iy[t] = temp_py2iy
        compt_p2compt_i[t] = temp_pa2ia + temp_py2iy
        compt_iy2compt_ih[t] = temp_iy2ih
        compt_h2compt_d[t] = temp_h2d

        # Check if school closure is triggered
        t_surveillance = np.maximum(t - monitor_lag * interval_per_day, 0)
        # Current number of infected
        current_iy = compt_iy[t_surveillance]
        new_iy = compt_py2compt_iy[t_surveillance]  # NEW
        cml_iy = np.sum(compt_py2compt_iy[:(t_surveillance + 1)], axis=0)
        trigger_type_dict = {
            'cml': cml_iy,
            'current': current_iy,
            'new': new_iy
        }
        trigger_iy = trigger_type_dict[trigger_type.lower()]

        if not school_closed:
            school_closed = school_closure.school_close(
                close_trigger, t_date, trigger_iy, metro_pop)
            if school_closed:
                school_close_time = t
                school_close_date = t_date
                school_close_iy = trigger_iy
        else:
            if not school_reopened:
                school_reopened = school_closure.school_reopen(
                    reopen_trigger, school_close_iy, trigger_iy,
                    school_close_time, t, t_date, interval_per_day)
                if school_reopened:
                    school_reopen_date = t_date

    return compt_s, compt_e, compt_pa, compt_py, compt_ia, compt_iy, compt_ih, \
           compt_r, compt_d, compt_e2compt_py,compt_e2compt_p, \
           compt_pa2compt_ia, compt_py2compt_iy, compt_p2compt_i, \
           compt_iy2compt_ih, compt_h2compt_d, school_close_date, \
           school_reopen_date
예제 #57
0
i = 0
latr = np.array([-36])
while latr[i] <= -20:
    i = i + 1
    tmp = latr[i - 1] + dl * np.cos(latr[i - 1] * np.pi / 180)
    latr = np.hstack([latr, tmp])

Lonr, Latr = np.meshgrid(lonr, latr)
Lonu, Lonv, Lonp = rho2uvp(Lonr)
Latu, Latv, Latp = rho2uvp(Latr)

M, L = Latp.shape

print(' \n' + '==> ' + '  COMPUTING METRICS  ...\n' + ' ')

print(' \n' + '==> ' + '  LLm = ' + np.str(L - 1) + ' ...\n' + ' ')
print(' \n' + '==> ' + '  MMm = ' + np.str(M - 1) + ' ...\n' + ' ')

# !!!!!!!!!!!!!!!!!!!!!
### CODE SOMETHING HERE TO WRITE THIS INFORMATION IN THE METADATA FILE
# !!!!!!!!!!!!!!!!!!!!!

pm, pn, dndx, dmde = get_metrics(Latu, Lonu, Latv, Lonv)
xr = 0 * pm
yr = xr.copy()

for i in np.arange(0, L):
    xr[:, i + 1] = xr[:, i] + 2 / (pm[:, i + 1] + pm[:, i])

for j in np.arange(0, M):
    yr[j + 1, :] = yr[j, :] + 2 / (pn[j + 1, :] + pn[j, :])
예제 #58
0
def compute_EW(lam,flx,wrest,lmts,flx_err,plot=False,**kwargs):
    #------------------------------------------------------------------------------------------
    #   Function to compute the equivalent width within a given velocity limits lmts=[vmin,vmax]
    #           [Only good for high resolution spectra]
    #  Caveats:- Not automated, must not include other absorption troughs within the velocity range.
    # 
    #   Input:- 
    #           lam         :- Observed Wavelength vector (units of Angstrom)
    #           flx         :- flux vector ( same length as wavelgnth vector, preferably continuum normalized)
    #           wrest       :- rest frame wavelength of the line [used to make velcity cuts]
    #           lmts        :- [vmin,vmax], the velocity window within which equivalent width is computed.
    #           flx_err     :- error spectrum [same length as the flux vector]
    #
    #   OPTIONAL :-
    #           f0=f0       :- fvalue of the transition 
    #           zabs=zabs   :- absorber redshift
    #           plot        :- plot keyword, default = no plots plot=0
    #                           plot=1 or anything else will plot the corresponding spectrum 
    #                            and the apparent optical depth of absorption. 
    #
    #
    #
    # Output:-  In a Python dictionary format
    #           output['ew_tot']      :- rest frame equivalent width of the absorpiton system [Angstrom]
    #           output['err_ew_tot']  :- error on rest fram equivalent width 
    #           output['col']         :- AOD column denisty 
    #           output['colerr']      :- 1 sigma error on AOD column density 
    #           output['n']           :- AOD column density as a function of velocity
    #           output['Tau_a']       :- AOD as a function of velocity
    #           output['med_vel']     :- Median Optical Depth weighted velocity within lmts
    #
    #
    #   Written :- Rongmon Bordoloi                             2nd November 2016
    #-  I translated this from my matlab code compute_EW.m, which in turn is from Chris Thom's eqwrange.pro. 
    #   This was tested with COS-Halos/Dwarfs data. 
    #   Edit:  RB July 5 2017. Output is a dictionary. Edited minor dictionary arrangement
    #          RB July 25 2019. Added med_vel
    #------------------------------------------------------------------------------------------
    defnorm=1.0;
    spl=2.9979e5;  #speed of light
    if 'zabs' in kwargs:
        zabs=kwargs['zabs']
    else:
        zabs=0.

    if 'sat_limit' in kwargs:
        sat_limit=kwargs['sat_limit']
    else:
        sat_limit=0.10 #  Limit for saturation (COS specific). Set to same as fluxcut for now. WHAT SHOULD THIS BE???
    vel = (lam-wrest*(1.0 + zabs))*spl/(wrest*(1.0 + zabs));
    lambda_r=lam/(1.+zabs);

    

    norm=defnorm

    norm_flx=flx/norm;
    flx_err=flx_err/norm;
    sq=np.isnan(norm_flx);
    tmp_flx=flx_err[sq]
    norm_flx[sq]=tmp_flx
    #clip the spectrum. If the flux is less than 0+N*sigma, then we're saturated. Clip the flux array(to avoid inifinite optical depth) and set the saturated flag
    q=np.where(norm_flx<=sat_limit);
    tmp_flx=flx_err[q]
    norm_flx[q]=tmp_flx
    q=np.where(norm_flx<=0.);
    tmp_flx=flx_err[q]+0.01
    norm_flx[q]=tmp_flx;


    del_lam_j=np.diff(lambda_r);
    del_lam_j=np.append([del_lam_j[0]],del_lam_j);


    pix = np.where( (vel >= lmts[0]) & (vel <= lmts[1]));
    Dj=1.-norm_flx

    # Equivalent Width Per Pixel
    ew=del_lam_j[pix]*Dj[pix];


    sig_dj_sq=(flx_err)**2.;
    err_ew=del_lam_j[pix]*np.sqrt(sig_dj_sq[pix]);
    err_ew_tot=np.sqrt(np.sum(err_ew**2.));
    ew_tot=np.sum(ew);
    print('W_lambda = ' + np.str('%.3f' % ew_tot) + ' +/- ' + np.str('%.3f' % err_ew_tot)  +'  \AA   over [' + np.str('%.1f' % np.round(lmts[0]))+' to ' +np.str('%.1f' % np.round(lmts[1])) + ']  km/s')
    output={}
    output["ew_tot"]=ew_tot
    output["err_ew_tot"]=err_ew_tot


    if 'f0' in kwargs:
        f0=kwargs['f0']
        #compute apparent optical depth
        Tau_a =np.log(1./norm_flx);
        
        #compute the median optical depth weighted velcity.
        Tau50=np.cumsum(Tau_a[pix])/np.max(Tau_a[pix])
        vel50=np.interp(0.5,Tau50,vel[pix])




        # REMEMBER WE ARE SWITCHING TO VELOCITY HERE
        del_vel_j=np.diff(vel);
        del_vel_j=np.append([del_vel_j[0]],del_vel_j)
        
        # Column density per pixel as a function of velocity
        nv = Tau_a/((2.654e-15)*f0*lambda_r);# in units cm^-2 / (km s^-1), SS91 
        n = nv* del_vel_j# column density per bin obtained by multiplying differential Nv by bin width 
        tauerr = flx_err/norm_flx;
        nerr = (tauerr/((2.654e-15)*f0*lambda_r))*del_vel_j; 
        col = np.sum(n[pix]);
        colerr = np.sum((nerr[pix])**2.)**0.5; 
        print('Direct N = ' + np.str('%.3f' % np.log10(col))  +' +/- ' + np.str('%.3f' % (np.log10(col+colerr) - np.log10(col))) + ' cm^-2')
        output["col"]=col
        output["colerr"]=colerr
        output["Tau_a"]=Tau_a
        output["med_vel"]=vel50
        




    # If plot keyword is  set start plotting
    if plot is not False:
        import matplotlib.pyplot as plt
        fig = plt.figure()
        ax1=fig.add_subplot(211)
        ax1.step(vel,norm_flx)
        ax1.step(vel,flx_err,color='r')
        #plt.xlim([lmts[0]-2500,lmts[1]+2500])
        plt.xlim([-600,600])
        plt.ylim([-0.02,1.8])
        ax1.plot([-2500,2500],[0,0],'k:')
        ax1.plot([-2500,2500],[1,1],'k:')       
        plt.plot([lmts[0],lmts[0]],[1.5,1.5],'r+',markersize=15)        
        plt.plot([lmts[1],lmts[1]],[1.5,1.5],'r+',markersize=15)    
        plt.title(r' $W_{rest}$= ' + np.str('%.3f' % ew_tot) + ' $\pm$ ' + np.str('%.3f' % err_ew_tot) + ' $\AA$')
        ax1.set_xlabel('vel [km/s]')
    
        ax2=fig.add_subplot(212)
        ax2.step(vel,n)
        ax2.set_xlabel('vel [km/s]')
        ax2.plot([-2500,2500],[0,0],'k:')
        #plt.xlim([lmts[0]-2500,lmts[1]+2500])
        plt.xlim([-600,600])
        plt.show()

    
    return output
예제 #59
0
plt.xlabel('Date')
plt.legend()
plt.show()

#Predictions Next 58 days
future_days = np.arange(299, 337, 1)
future_days = future_days.reshape(-1, 1)
future_pred = []
for i in future_days:
    m = reg.predict(sc_x.transform(np.array([i])))
    future = np.around(sc_y.inverse_transform(m))
    future_pred.append(future)

future_days = np.arange(1, np.size(future_days) + 1, 1)

# Excel File
SVR_IndiaDC_pred = pd.DataFrame({
    'Days since 11/23': future_days,
    'Cases Prediction': future_pred
})
SVR_December_DC = SVR_IndiaDC_pred.to_excel(
    "IndiaSupportVectorRegressionDecemberCases.xlsx",
    sheet_name='December Daily Cases Prediction')

print('------------SVR EVALUATION-------------')
print('RMSE Score: ' + np.str(rmse))
print('RMSLE Score: ' + np.str(rmsle))
print('R2 Score: ' + np.str(R2))
print(SVRfit_India_DC)
print(SVR_IndiaDC_pred)
예제 #60
0
import pyroms
import pyroms_toolbox

from remap_clm import remap_clm
from remap_clm_uv import remap_clm_uv

lst_year = sys.argv[1:]

data_dir = '/archive/u1/uaf/kate/HYCOM/Svalbard/Monthly_avg/'
dst_dir = './clm/'

lst_file = []

for year in lst_year:
    year = np.str(year)
    #    lst = commands.getoutput('ls ' + data_dir + 'SODA_2.1.6_' + year + '_0*')
    lst = commands.getoutput('ls ' + data_dir + '*' + year + '*')
    lst = lst.split()
    lst_file = lst_file + lst

print 'Build CLM file from the following file list:'
print lst_file
print ' '

src_grd = pyroms_toolbox.Grid_HYCOM.get_nc_Grid_HYCOM(
    '/archive/u1/uaf/kate/HYCOM/Svalbard/HYCOM_GLBa0.08_North_grid2.nc')
dst_grd = pyroms.grid.get_ROMS_grid('ARCTIC2')

for file in lst_file:
    # remap