Esempio n. 1
0
    def _init_code(self):
        self.velocity = float(self.parameters["velocity"])
        self.age = int(self.parameters["age"])
        normalise = bool(self.parameters["normalise"])

        # Time grid and age. If needed, the age is rounded to the inferior Myr
        time_grid = np.arange(self.age)

        # Values from Buat et al. (2008) table 2
        paper_velocities = np.array([80., 150., 220., 290., 360.])
        paper_as = np.array([6.62, 8.74, 10.01, 10.81, 11.35])
        paper_bs = np.array([0.41, 0.98, 1.25, 1.35, 1.37])
        paper_cs = np.array([0.36, -0.20, -0.55, -0.74, -0.85])

        # Interpolation of a, b, c corresponding to the velocity.
        a = np.interp(self.velocity, paper_velocities, paper_as)
        b = np.interp(self.velocity, paper_velocities, paper_bs)
        c = np.interp(self.velocity, paper_velocities, paper_cs)

        # Main SFR
        t = (time_grid+1) / 1000  # The time is in Gyr in the formulae
        self.sfr = 10.**(a + b * np.log10(t) + c * t**.5) / 1.e9

        # Compute the galaxy mass and normalise the SFH to 1 solar mass
        # produced if asked to.
        self.sfr_integrated = np.sum(self.sfr) * 1e6
        if normalise:
            self.sfr /= self.sfr_integrated
            self.sfr_integrated = 1.
	def align(self, dataX, dataXY, reverse=False):
		if reverse:
			self.aligned = list(reversed(np.interp(list(reversed(dataX)), list(reversed(dataXY[0])), list(reversed(dataXY[1])))))
		else:
			self.aligned = list(np.interp(dataX, dataXY[0], dataXY[1]))
		
		return self.aligned
Esempio n. 3
0
    def test_mask_LUT(self):
        """
        The masked image has a masked ring around 1.5deg with value -10
        without mask the pixels should be at -10 ; with mask they are at 0
        """
        x1 = self.ai.xrpd_LUT(self.data, 1000)
#        print self.ai._lut_integrator.lut_checksum
        x2 = self.ai.xrpd_LUT(self.data, 1000, mask=self.mask)
#        print self.ai._lut_integrator.lut_checksum
        x3 = self.ai.xrpd_LUT(self.data, 1000, mask=numpy.zeros(shape=self.mask.shape, dtype="uint8"), dummy= -20.0, delta_dummy=19.5)
#        print self.ai._lut_integrator.lut_checksum
        res1 = numpy.interp(1.5, *x1)
        res2 = numpy.interp(1.5, *x2)
        res3 = numpy.interp(1.5, *x3)
        if logger.getEffectiveLevel() == logging.DEBUG:
            pylab.plot(*x1, label="nomask")
            pylab.plot(*x2, label="mask")
            pylab.plot(*x3, label="dummy")
            pylab.legend()
            pylab.show()
            raw_input()

        self.assertAlmostEqual(res1, -10., 1, msg="Without mask the bad pixels are around -10 (got %.4f)" % res1)
        self.assertAlmostEqual(res2, 0., 4, msg="With mask the bad pixels are actually at 0 (got %.4f)" % res2)
        self.assertAlmostEqual(res3, -20., 4, msg="Without mask but dummy=-20 the dummy pixels are actually at -20 (got % .4f)" % res3)
Esempio n. 4
0
    def __init__(self, R_in, z_in, t_in, psi_axis, psi_sep, psi_in, R_out, z_out, t_out):

        print('3d interp')

        self.error = 0

# Check input dimensions, R_out z_out must be flat

        if len(R_out) != len(z_out):
            print('R and z must have the same dimensions')
            self.error = 1
            return
        if np.array(R_out).ndim > 1:
            print('R_out must be flat')
            self.error = 2
            return
        if np.array(z_out).ndim > 1:
            print('R_out must be flat')
            self.error = 3
            return
        if len(t_in) != len(psi_axis):
            print('Inconsistent time axis for psi_axis')
            self.error = 4
            return
        if len(t_in) != len(psi_sep):
            print('Inconsistent time axis for psi_sep')
            self.error = 5
            return
        nt_psi, nz_psi, nR_psi = psi_in.shape
        if len(t_in) != nt_psi:
            print('Inconsistent time axis for psi_in')
            self.error = 6
            return
        if len(R_in) != nR_psi:
            print('Inconsistent R axis for psi_in')
            self.error = 7
            return
        if len(z_in) != nz_psi:
            print('Inconsistent z axis for psi_in')
            self.error = 8
            return

        nRz = len(R_out)
        nt = len(t_out)
        self.psi_red = np.zeros((nt, nRz))

# Trilinear interpolation

        int3d = RegularGridInterpolator((t_in, z_in, R_in), psi_in, \
                                        method='linear', fill_value=None) 
        self.psi_norm = np.zeros((nt, nRz))
        self.psi_axis = np.interp(t_out, t_in, psi_axis)
        self.psi_sep  = np.interp(t_out, t_in, psi_sep)

        for jt in range(nt):
            for jRz in range(nRz):
                self.psi_red[jt, jRz] = int3d([t_out[jt], z_out[jRz], R_out[jRz]])
            self.psi_norm[jt] = (self.psi_red[jt] - self.psi_axis[jt])/ \
                                (self.psi_sep[jt] - self.psi_axis[jt])
        self.rho_pol = np.sqrt(self.psi_norm)
Esempio n. 5
0
 def sample_line_segment_mm_s(start_xy_mm, end_xy_mm, dt_s, mW=None, max_mm=5.0):
     """ Given a line segment in mm space, map it to galvo space.
         To make the line straight in mm space, samples may be added to 
         more-closely approximate a straight line.
         Returns: An array of shape nx3 (if mW is None) or nx4 (if mW is not None) 
                     of points time deltas in mm and seconds,
                     excluding start_xy_mm and including end_xy_mm,
                     possibly including samples along the way.
     """
     import FLP
     from numpy.linalg import norm
     dist_mm = norm(np.asarray(end_xy_mm) - start_xy_mm)
     if dist_mm <= max_mm:
         if mW is None:
             return np.array((tuple(end_xy_mm) + (dt_s,),)) # Just the end sample.
         else:
             return np.array((tuple(end_xy_mm) + (dt_s, mW),)) # Just the end sample.
     samples_s = np.linspace(0, dt_s, np.ceil(dist_mm / max_mm) + 1)
     timeRange_s = (0, dt_s)
     if mW is None:
         return np.transpose([np.interp(samples_s[1:], timeRange_s, (start_xy_mm[0], end_xy_mm[0])),
                              np.interp(samples_s[1:], timeRange_s, (start_xy_mm[1], end_xy_mm[1])),
                              np.diff(samples_s)])
     else:
         return np.transpose([np.interp(samples_s[1:], timeRange_s, (start_xy_mm[0], end_xy_mm[0])),
                              np.interp(samples_s[1:], timeRange_s, (start_xy_mm[1], end_xy_mm[1])),
                              np.diff(samples_s),
                              mW * np.ones_like(samples_s[1:])])
Esempio n. 6
0
def Subtracted(minute_time, smooth_time, minute_bx, smooth_Srx, minute_by, smooth_Sry):
	"""Subtracts the smoothed solar regular curves from the minute binned data.

		Parameters
		-----------
		minute_time = array of minute binned time floats
		smooth_time = array of solar regular time floats
		minute_bx, minute_by = arrays of minute binned horizontal magnetic floats
		smooth_Srx, smooth_Sry = arrays of smoothed solar regular curves

		Returns
		-----------
		subtracted_bx, subtracted_by = arrays of subtracted magnetic floats

		-----------------------------------------------------------------
	"""

	subtracted_bx, subtracted_by = [], []
	for index, value in enumerate(minute_time):
		x = np.interp(value, smooth_time, smooth_Srx)
		y = np.interp(value, smooth_time, smooth_Sry)
		subtracted_bx.append(minute_bx[index]-x)
		subtracted_by.append(minute_by[index]-y)

	return subtracted_bx, subtracted_by
Esempio n. 7
0
    def render(self, model, params, frame):

        # Scalar animation parameter, based on height and distance
        d = model.edgeCenters[:,2] + 0.5 * model.edgeDistances
        numpy.multiply(d, 1/self.height, d)

        # Add global offset for Z scrolling over time
        numpy.add(d, params.time * self.speed, d)

        # Add an offset that depends on which tree we're in
        numpy.add(d, numpy.choose(model.edgeTree, self.offsets), d)

        # Periodic animation, stored in our color table. Linearly interpolate.
        numpy.fmod(d, self.period, d)
        color = numpy.empty((model.numLEDs, 3))
        color[:,0] = numpy.interp(d, self.colorX, self.colorY[:,0])
        color[:,1] = numpy.interp(d, self.colorX, self.colorY[:,1])
        color[:,2] = numpy.interp(d, self.colorX, self.colorY[:,2])

        # Random flickering noise
        noise = numpy.random.rand(model.numLEDs).reshape(-1, 1)
        numpy.multiply(noise, 0.25, noise)
        numpy.add(noise, 0.75, noise)

        numpy.multiply(color, noise, color)
        numpy.add(frame, color, frame)
Esempio n. 8
0
    def _getParamsFromIMT(self, imt):
        """
        Helper function to return (possibly interpolated) conversion
        parameters for a given IMT.
        """

        if imt == PGA():
            sigma = self.pars['sigma'][0]
            c0 = self.pars['c0smooth'][0]
            r1 = self.pars['r1smooth'][0]
            m1 = self.pars['m1smooth'][0]
            m2 = self.pars['m2smooth'][0]
        elif imt == PGV():
            sigma = self.pars['sigma'][1]
            c0 = self.pars['c0smooth'][1]
            r1 = self.pars['r1smooth'][1]
            m1 = self.pars['m1smooth'][1]
            m2 = self.pars['m2smooth'][1]
        elif 'SA' in imt:
            imt_per = imt.period
            pa = self.pars['per'][2:]
            sigma = np.interp(imt_per, pa, self.pars['sigma'][2:])
            c0 = np.interp(imt_per, pa, self.pars['c0smooth'][2:])
            r1 = np.interp(imt_per, pa, self.pars['r1smooth'][2:])
            m1 = np.interp(imt_per, pa, self.pars['m1smooth'][2:])
            m2 = np.interp(imt_per, pa, self.pars['m2smooth'][2:])
        else:
            raise ValueError("Unknown IMT: %s" % str(imt))

        return (sigma, c0, r1, m1, m2)
    def __interpolate_path(self, location1, location2, frames):
        """
        Generate points between the points of the given start/stop points.
        :param lat1:
        :param lng1:
        :param lat2:
        :param lng2:
        :param frames:
        :return:
        """

        x = [location1["lat"], location2["lat"]]
        y = [location1["lng"], location2["lng"]]
        if x[0] - x[1] == 0:
            yvals = numpy.linspace(y[0], y[1], frames)
            xvals = numpy.interp(yvals, y, x)
        else:
            xvals = numpy.linspace(x[0], x[1], frames)
            yvals = numpy.interp(xvals, x, y)

        # create geo location point with each point
        # as dictionary containing lat and lng values
        locations = []
        for lat, lng in zip(xvals, yvals):
            point = {"lat": lat, "lng": lng}
            locations.append(point)

        return locations
Esempio n. 10
0
def total_pollution_cost(nodes,osm,factor): #takes in a list of nodes, returns the total pollution cost of the way
    x,y= [osm[node].lat for node in nodes],[osm[node].lon for node in nodes]

    #print x,y
#    f = interp1d(x, y)
#    f2 = interp1d(x, y, kind='cubic')
    length = int(getlengthofway(x,y)*factor) #with this calculation, each evenly-spaced point is around 50 meters from each other
    M = max(1,length) #The reason this was slow is because every way is interpolated the same number of points regardless of length, what we want to do is to make the number of interpolating points directly proportional to the length of way.
    #in order to optimise this (to make number of interpolating points proportional to the length of way) we need to calculate the length of way.
    t = np.linspace(0, len(x), M) #creates M points from 0-len(x)
    x = np.interp(t, np.arange(len(x)), x)
    y = np.interp(t, np.arange(len(y)), y)
    tol = 0.0004 #problem with this weighting algorithm is that single-node streets can be disproportionately weighted compared to streets with many nodes...
    i, idx = 0, [0]
    j=0
    while i < len(x):
        total_dist = 0
        for j in range(i+1, len(x)):
            total_dist += sqrt((x[j]-x[j-1])**2 + (y[j]-y[j-1])**2)
            if total_dist > tol:
                idx.append(j)
                break
        i = j+1
    xn = x[idx]
    yn = y[idx]
    pollution_levels = load_func(xn,yn)
    result=0
    for i in pollution_levels: #this assumes all evenly points across all ways are actually equally spaced
        result+=i
    return result
Esempio n. 11
0
    def test_split_at(self, plot=False):

        gait_data = GaitData(self.data_frame)
        gait_data.grf_landmarks('Right Vertical GRF',
                                   'Left Vertical GRF',
                                   threshold=self.threshold)

        side = 'right'
        series = 'Right Vertical GRF'

        gait_cycles = gait_data.split_at(side)

        for i, cycle in gait_cycles.iteritems():
            start_heelstrike_time = gait_data.strikes[side][i]
            end_heelstrike_time = gait_data.strikes[side][i + 1]
            hs_to_hs = gait_data.data[series][start_heelstrike_time:end_heelstrike_time]
            num_samples = len(cycle[series])
            new_time = np.linspace(0.0, end_heelstrike_time,
                                   num=num_samples + 1)
            old_time = np.linspace(0.0, end_heelstrike_time, num=num_samples)
            new_values = np.interp(new_time, old_time, hs_to_hs.values)
            testing.assert_allclose(cycle[series], new_values[:-1])

        if plot is True:
            gait_data.plot_gait_cycles(series, 'Left Vertical GRF')

        gait_cycles = gait_data.split_at(side, 'stance')

        for i, cycle in gait_cycles.iteritems():
            start_heelstrike_time = gait_data.strikes[side][i]
            end_toeoff_time = gait_data.offs[side][i + 1]
            hs_to_toeoff = gait_data.data[series][start_heelstrike_time:end_toeoff_time]
            num_samples = len(cycle[series])
            new_time = np.linspace(0.0, end_toeoff_time,
                                   num=num_samples + 1)
            old_time = np.linspace(0.0, end_toeoff_time, num=num_samples)
            new_values = np.interp(new_time, old_time, hs_to_toeoff.values)
            testing.assert_allclose(cycle[series], new_values[:-1])

        if plot is True:
            gait_data.plot_gait_cycles(series, 'Left Vertical GRF')

        gait_cycles = gait_data.split_at(side, 'swing')

        for i, cycle in gait_cycles.iteritems():
            start_toeoff_time = gait_data.offs[side][i]
            end_heelstrike_time = gait_data.strikes[side][i]
            toeoff_to_heelstrike = gait_data.data[series][start_toeoff_time:end_heelstrike_time]
            num_samples = len(cycle[series])
            new_time = np.linspace(0.0, end_heelstrike_time,
                                   num=num_samples + 1)
            old_time = np.linspace(0.0, end_heelstrike_time, num=num_samples)
            new_values = np.interp(new_time, old_time,
                                   toeoff_to_heelstrike.values)
            testing.assert_allclose(cycle[series], new_values[:-1])

        if plot is True:
            gait_data.plot_gait_cycles(series, 'Left Vertical GRF')
            import matplotlib.pyplot as plt
            plt.show()
Esempio n. 12
0
def slice_jackknife(z, zmin=0.02, zmax=0.5, cube_cmpc_depth=150, dz=0.001):
	z1 = z[(z >= zmin) & (z <= zmax)]
	pdf = np.histogram(z1, bins=np.linspace(zmin, zmax, int((zmax - zmin)/dz)))
	fp = pdf[0] / float(pdf[0].sum())
	cdf = np.cumsum(fp)
	zm = pdf[1][:-1]

	nbin = 10
	fracs = np.linspace(0., 1., nbin + 1)
	ze = np.interp(fracs, cdf, zm)
	min_depth = np.diff(cmpc(ze) * 0.7).min()
	while np.float32(min_depth) <= cube_cmpc_depth:
		nbin -= 1
		fracs = np.linspace(0., 1., nbin + 1)
		ze1 = np.interp(fracs, cdf, zm)
		min_depth = np.diff(cmpc(ze1) * 0.7).min()
		ze = ze1
		#print('nbin = %i'%nbin)
		#print(cmpc(ze) * 0.7)
		#print(np.diff(cmpc(ze) * 0.7))
		#print('\n')
	new_z_edge = np.concatenate(([zmin], ze[1:-1], [zmax]))
	print('nbin = %i'%(len(new_z_edge) - 1))
	print('z-edges:')
	print(new_z_edge)
	print('cMpc diffs:')
	print(np.diff(cmpc(new_z_edge) * 0.7))
	return new_z_edge
Esempio n. 13
0
def gen_map(vals, n=nbins, hue=False):
	# saturation
	fvals = vals.flatten()
	yh, xh, patches = plt.hist(fvals, bins=n, range=(0,1), normed=False , cumulative=False , histtype='step')
	if hue:
		# apply window
		M = 9
		win = np.kaiser(M, 3.0)
		yh = np.insert(yh, 0, np.zeros(M/2))
		yh = np.append(yh, np.zeros(M/2))
		yh = rolling_window(yh.T, M)
		yh = np.dot(yh, win)
	yh /= sum(yh)
	if hue:
		# adapted norm
		#yh = np.minimum(yh, hcut)
		yh[yh<=hcut] = 0
		yh /= sum(yh)
	yh = np.cumsum(yh)

	xhi = np.linspace(0,1,256)
	yhi = np.interp(xhi, yh, xh[1:])
	yhinv = np.interp(xhi, xh[1:], yh)
	#plt.plot(xhi, yhi)
	return (yhi, yhinv)
def interp_D(zseek, ascale, D0, D1):
	"""
	interpolate 1D, using numpy, the given values of the growth 
	function D0 and its derivative D1 at given values of the scale 
	factor ascale to return a tuple of the growth function Dseek, 
	and logDseek at the requested values of redshift zseek.  

	args:
		zseek  :

		ascale :

		D0     :
		
		D1     :

	returns:
		tuple (Dseek , logDseek ) , where Dseek is the interpolated 
		values of D0 at zseek, and logDseek, is the interpolated
		values of D1 at zseek.

	status:
		Written by Suman Bhattacharya, Dec 2013
	"""

	aseek    = 1/(1+zseek)
	Dseek    = np.interp(aseek, ascale, D0, left = np.nan , right = np.nan)
	logDseek = np.interp(aseek, ascale, D1, left = np.nan , right = np.nan)

	return (Dseek, logDseek)
Esempio n. 15
0
    def __getattr__(self,name):
        if name not in _IMPS | _WAKES | {'w','s'}:
            return [getattr(x,name) for x in self]

        w = _np.unique(_np.concatenate([getattr(x,'w') for x in self]))
        if name == 'w': return w
        if name in _IMPS:
            temp = _np.zeros(w.shape,dtype=complex)
            for el in self:
                attr = getattr(el,name)
                if attr is None or len(attr) == 0: continue
                temp += 1j*_np.interp(w,el.w,attr.imag,left=0.0,right=0.0)*el.quantity*_BETA[name](el)
                temp +=    _np.interp(w,el.w,attr.real,left=0.0,right=0.0)*el.quantity*_BETA[name](el)
            return temp

        s = _np.unique(_np.concatenate([getattr(x,'s') for x in self]))
        if name == 's': return s
        if name in _WAKES:
            temp = _np.zeros(s.shape,dtype=float)
            for el in self:
                attr = getattr(el,name)
                if attr is None or len(attr) == 0: continue
                temp += _np.interp(s,el.s,attr,left=0.0,right=0.0)*el.quantity*_BETA[name](el)
            return temp
        raise AttributeError("'"+self.__class__.__name__+ "' object has no attribute '"+name+"'" )
Esempio n. 16
0
def test2():
    """ test momentum and mass conservation in 3d """
    import pylab as pl
    r,p,rho,u,r_s,p_s,rho_s,u_s,shock_speed = \
        sedov(t=0.05, E0=5.0, rho0=5.0, g=5.0/3.0,n=10000)

    dt = 1e-5
    r2,p2,rho2,u2 = sedov(t=0.05+dt, E0=5.0, rho0=5.0, g=5.0/3.0, n=9000)[:4]

    # align the results
    from numpy import interp, gradient
    p2 = interp(r,r2,p2)
    rho2 = interp(r,r2,rho2)
    u2 = interp(r,r2,u2)

    # mass conservation
    pl.plot(r, -gradient(rho*u*r*r)/(r*r*gradient(r)), 'b', label=r'$\frac{1}{r^2}\frac{\partial}{\partial r} \rho u r^2$')
    pl.plot(r, (rho2-rho)/dt, 'k', label=r'$\frac{\partial \rho}{\partial t}$')

    # momentum conservation
    pl.plot(r, -gradient(p)/gradient(r), 'g',label=r'$-\frac{\partial p}{\partial r}$')
    pl.plot(r, rho*((u2-u)/dt+u*gradient(u)/gradient(r)), 'r',label=r'$\rho \left( \frac{\partial u}{\partial t} + u\frac{\partial u}{\partial r} \right)$')

    pl.legend(loc='lower left')
    pl.show()        
Esempio n. 17
0
def get_temperatures_at(t,get_load_temp=False):
    global epochs
    global ncs
    global _filecache
    
    if np.isscalar(t):
        start_time = t
        end_time = t
    else:
        start_time = t.min()
        end_time = t.max()

    idx = bisect.bisect_right(epochs,start_time)
    idx = idx - 1
    if idx < 0:
        idx = 0
    ncname = ncs[idx]
    if _filecache.has_key(ncname) and (time.time()-end_time) > 1*3600: #if we're looking for data from more than 1 hours ago, look in the cache
        times,temps,load = _filecache[ncname]
    else:
        times,temps,load = get_temperature_from_nc(ncname)
        _filecache[ncname] = (times,temps,load)
        
    if end_time > times[-1]:
        print "Warning: requested times may span more than one log file, so results may not be as intended"
        print "log file is: %s, last requested time is %s" % (ncname, time.ctime(end_time))

    load_temperature = np.interp(t,times,load)
    package_temperature = np.interp(t,times,temps)
    return package_temperature,np.nan,load_temperature,np.nan
Esempio n. 18
0
    def test_single_path_ak135(self):
        """
        Test the raypath for a single phase. This time for model AK135.
        """
        filename = os.path.join(
            DATA, "taup_path_-o_stdout_-h_10_-ph_P_-deg_35_-mod_ak135")
        expected = np.genfromtxt(filename, comments='>')

        m = TauPyModel(model="ak135")
        arrivals = m.get_ray_paths(source_depth_in_km=10.0,
                                   distance_in_degree=35.0, phase_list=["P"])
        self.assertEqual(len(arrivals), 1)

        # Interpolate both paths to 100 samples and make sure they are
        # approximately equal.
        sample_points = np.linspace(0, 35, 100)

        interpolated_expected = np.interp(
            sample_points,
            expected[:, 0],
            expected[:, 1])

        interpolated_actual = np.interp(
            sample_points,
            np.round(np.degrees(arrivals[0].path['dist']), 2),
            np.round(6371 - arrivals[0].path['depth'], 2))

        self.assertTrue(np.allclose(interpolated_actual,
                                    interpolated_expected, rtol=1E-4, atol=0))
Esempio n. 19
0
    def scattering_factors(self, energy=None, wavelength=None):
        """
        X-ray scattering factors f', f''.

        :Parameters:
            *energy* : float or vector | keV
                X-ray energy.

        :Returns:
            *scattering_factors* : (float, float)
                Values outside the range return NaN.

        Values are found from linear interpolation within the Henke Xray
        scattering factors database at the Lawrence Berkeley Laboratory
        Center for X-ray Optics.
        """
        xsf = self.sftable
        if xsf is None:
            return None, None

        if wavelength is not None:
            energy = xray_energy(wavelength)
        if energy is None:
            raise TypeError('X-ray scattering factors need wavelength or energy')

        scalar = numpy.isscalar(energy)
        if scalar:
            energy = numpy.array([energy])
        f1 = numpy.interp(energy, xsf[0], xsf[1], left=nan, right=nan)
        f2 = numpy.interp(energy, xsf[0], xsf[2], left=nan, right=nan)
        if scalar:
            f1, f2 = f1[0], f2[0]
        return f1, f2
Esempio n. 20
0
def saveMapAsOSM(contours):
    output = open('output.osm','w+')
    output.write("<osm>")

    for i,contour in enumerate(contours):
        for j,point in enumerate(contour):
            id = getUniqueId(i,j)
            lon = np.interp(point[0][0], [0,w],map_bounds[0])
            lat = np.interp(point[0][1], [0,h],map_bounds[1])
            output.write("<node id='{id}' visible='true' user='******' lat='{lat}' lon='{lon}'/>".format(id=id,lat=lat,lon=lon))

    for i,contour in enumerate(contours):
        output.write("<way id='{id}' visible='true' user='******'>".format(id=i))
        for j,point in enumerate(contour):
            id = getUniqueId(i,j)
            output.write("<nd ref='{id}'/>".format(id=id))
        output.write("<tag k='highway' v='residential'/>")
        output.write("<tag k='is_in:city' v='Cluj-Napoca'/>")
        output.write("<tag k='name' v='{street} St.'/>".format(street=random.choice(names)))
        output.write("</way>")

    output.write("<bounds minlat='{minlat}' minlon='{minlon}' maxlat='{maxlat}' maxlon='{maxlon}'/>".format(minlat=map_bounds[1][0],minlon=map_bounds[0][0],maxlat=map_bounds[1][1],maxlon=map_bounds[0][1]))
    output.write("</osm>")
    output.close()
    print 'saved to file'
Esempio n. 21
0
def kpss_crit(stat, trend='c'):
    """
    Linear interpolation for KPSS p-values and critical values

    Parameters
    ----------
    stat : float
        The KPSS test statistic.
    trend : str, {'c','ct'}
        The trend used when computing the KPSS statistic

    Returns
    -------
    pvalue : float
        The interpolated p-value
    crit_val : array
        Three element array containing the 10%, 5% and 1% critical values,
        in order

    Notes
    -----
    The p-values are linear interpolated from the quantiles of the simulated
    KPSS test statistic distribution using 100,000,000 replications and 2000
    data points.
    """
    table = kpss_critical_values[trend]
    y = table[:, 0]
    x = table[:, 1]
    # kpss.py contains quantiles multiplied by 100
    pvalue = interp(stat, x, y) / 100.0
    cv = [1.0, 5.0, 10.0]
    crit_value = interp(cv, y[::-1], x[::-1])

    return pvalue, crit_value
Esempio n. 22
0
def total_tau_profile_func(wave_to_fit,h1_col,h1_b,h1_vel,d2h=1.5e-5):

    """
    Given a wavelength array and parameters (H column density, b value, and 
    velocity centroid), computes the Voigt profile of HI and DI Lyman-alpha
    and returns the combined absorption profile.

    """

    ##### ISM absorbers #####

    ## HI ##
   
    hwave_all,htau_all=tau_profile(h1_col,h1_vel,h1_b,'h1')
    tauh1=np.interp(wave_to_fit,hwave_all,htau_all)
    clean_htau_all = np.where(np.exp(-htau_all) > 1.0)
    htau_all[clean_htau_all] = 0.0

    ## DI ##

    d1_col = np.log10( (10.**h1_col)*d2h )

    dwave_all,dtau_all=tau_profile(d1_col,h1_vel,h1_b,'d1')
    taud1=np.interp(wave_to_fit,dwave_all,dtau_all)
    clean_dtau_all = np.where(np.exp(-dtau_all) > 1.0)
    dtau_all[clean_dtau_all] = 0.0


    ## Adding the optical depths and creating the observed profile ##

    tot_tau = tauh1 + taud1
    tot_ism = np.exp(-tot_tau)

    return tot_ism
    def __call__(self,P,Q,A,dt):
        '''
        P,Q,A must be list in the form [P1 (mother) ,P2 (LD) ..(,P3 (RD))  ]
        in the same form as the bifucrtaion/connection was initialized
        '''
        if len(A) != len(Q) != len(P) != self.number:
            raise Error(" Wrong length of input variables")
        # solution vector
        x = []
                
        for i in range(self.number):
            
            pos = self.positions[i]
            self.P[i] = P[i][pos]
            self.Q[i] = Q[i][pos]
            self.A[i] = A[i][pos]
            
            zi = self.z[i][pos] + self.vz[i]  * self.c_func[i](A[i],P[i],pos) * dt
            
            self.du[i] = np.array([np.interp(zi,self.z[i],P[i]),np.interp(zi,self.z[i],Q[i])])-np.array([self.P[i],self.Q[i]])  
                        
            self.systemEquations[i].updateLARL(P[i],Q[i],A[i],idArray=[pos],bool='L') #
            
            self.domega[i] =  np.dot(self.systemEquations[i].L[pos][pos+1],self.du[i])

            x.extend([self.P[i],self.Q[i]])
               
        sol = fsolve(self.fsolveFunction,x)
        sol = sol.tolist()
        for i in range(self.number):
            sol.insert(i*3+2,self.A[i]) ## change to pSolution
        
        return sol
Esempio n. 24
0
def chi_path(path, r, sig2, energy_shift, s02, N):
    # this gives errors at small k (<0.1), but it doesn't matter
    # as the k-window should always be zero in this region
    # also uses real momentum for dw factor
    delta_k = 0.05
    vrcorr = energy_shift

    xkpmin = xk2xkp(path["xk"][0], vrcorr)
    n = int(xkpmin / delta_k)
    if xkpmin > 0.0:
        n += 1
    xkmin = n * delta_k

    npts = len(path["xk"])

    xkout = numpy.arange(xkmin, 20.0 + delta_k, delta_k)
    xk0 = xkp2xk(xkout, vrcorr)

    f0 = numpy.interp(xk0, path["xk"], path["afeff"])
    lambda0 = numpy.interp(xk0, path["xk"], path["xlam"])
    delta0 = numpy.interp(xk0, path["xk"], path["cdelta"] + path["phfeff"])
    redfac0 = numpy.interp(xk0, path["xk"], path["redfac"])
    rep0 = numpy.interp(xk0, path["xk"], path["rep"])
    p0 = rep0 + 1j / lambda0
    dr = r - path["reff"]

    chi = numpy.zeros(len(xk0), dtype=complex)

    chi[1:] = redfac0[1:] * s02 * N * f0[1:] / (xk0[1:] * (path["reff"] + dr) ** 2.0)
    chi[1:] *= numpy.exp(-2 * path["reff"] / lambda0[1:])
    chi[1:] *= numpy.exp(-2 * (p0[1:] ** 2.0) * sig2)
    chi[1:] *= numpy.exp(1j * (2 * p0[1:] * dr - 4 * p0[1:] * sig2 / path["reff"]))
    chi[1:] *= numpy.exp(1j * (2 * xk0[1:] * path["reff"] + delta0[1:]))

    return xkout, numpy.imag(chi)
Esempio n. 25
0
File: mesh.py Progetto: wk1984/gimli
def transform2DMeshTo3D(mesh, x, y, z=None):
    """
    Transform a 2D mesh into 3D coordinates using a point list (e.g. from GPS)

    Parameters
    ----------
    mesh: GIMLi::Mesh
    x,y: array of x/y positions along 2d profile
    z: optional height to add (topographical correction if computed flat earth)

    See Also
    --------

    References
    ----------
    """

    # get mesh node positions
    mt, mz = pg.x( mesh.positions() ), pg.y( mesh.positions() ) # mesh tape and z
    # compute length of reference points along tape
    pt = np.hstack( (0., np.cumsum( np.sqrt( np.diff( x )**2 + np.diff( y )**2 ) ) ) )
    #  interpolate node positions from tape to x/y using tape positions
    mx = np.interp( mt, pt, x )
    my = np.interp( mt, pt, y )
    # compute z offset by interpolating z
    if z is None:
        oz = np.zeros( len(mt) )
    else:
        oz = np.interp( mt, pt, z )

    # set the positions in the mesh
    for i, node in enumerate( mesh.nodes() ):
        node.setPos( pg.RVector3( mx[i], my[i], mz[i]+oz[i] ) )
Esempio n. 26
0
def bifurcation_s21(params,f):
    """
    Swenson paper:
        Equation: y = yo + A/(1+4*y**2)
    """
    A = (params['A_mag'].value *
         np.exp(1j * params['A_phase'].value))
    f_0 = params['f_0'].value
    Q = params['Q'].value
    Q_e = (params['Q_e_real'].value +
           1j * params['Q_e_imag'].value)
           
    a = params['a'].value
    
    if np.isscalar(f):
        fmodel = np.linspace(f*0.9999,f*1.0001,1000)
        scalar = True
    else:
        fmodel = f
        scalar = False
    y_0 = ((fmodel - f_0)/f_0)*Q
    y =  (y_0/3. + 
            (y_0**2/9 - 1/12)/cbrt(a/8 + y_0/12 + np.sqrt((y_0**3/27 + y_0/12 + a/8)**2 - (y_0**2/9 - 1/12)**3) + y_0**3/27) + 
            cbrt(a/8 + y_0/12 + np.sqrt((y_0**3/27 + y_0/12 + a/8)**2 - (y_0**2/9 - 1/12)**3) + y_0**3/27))
    x = y/Q
    s21 = A*(1 - (Q/Q_e)/(1+2j*Q*x))
    msk = np.isfinite(s21)
    if scalar or not np.all(msk):
        s21_interp_real = np.interp(f,fmodel[msk],s21[msk].real)
        s21_interp_imag = np.interp(f,fmodel[msk],s21[msk].imag)
        s21new = s21_interp_real+1j*s21_interp_imag
    
    else:
        s21new = s21
    return s21new*cable_delay(params,f)
Esempio n. 27
0
 def test_zero_dimensional_interpolation_point(self):
     x = np.linspace(0, 1, 5)
     y = np.linspace(0, 1, 5)
     x0 = np.array(.3)
     assert_almost_equal(np.interp(x0, x, y), x0)
     x0 = np.array(.3, dtype=object)
     assert_almost_equal(np.interp(x0, x, y), .3)
def short_field(interface):

    # unpack data
    results_field   = interface.results.takeoff_field_length
    results_fuel    = interface.results.fuel_for_missions
    available_tofl  = interface.analyses.missions.short_field.mission.airport.available_tofl
 
    tofl_vec        = results_field.takeoff_field_length
    weight_vec_tofl = results_field.takeoff_weights
    
    range_vec       = results_fuel.distances
    weight_vec_fuel = results_fuel.weights
    fuel_vec        = results_fuel.fuels
        
    # evaluate maximum allowable takeoff weight from a given airfield
    tow_short_field = np.interp(available_tofl,tofl_vec,weight_vec_tofl)

    # determine maximum range/fuel based in tow short_field
    range_short_field = np.interp(tow_short_field,weight_vec_fuel,range_vec)
    fuel_short_field  = np.interp(tow_short_field,weight_vec_fuel,fuel_vec)

    # pack 
    results = Data()
    results.tag            = 'short_field'
    results.takeoff_weight = tow_short_field
    results.range          = range_short_field
    results.fuel           = fuel_short_field

    return results
Esempio n. 29
0
def periodic_integrate(ts, peak_list, offset=0., period=1.):
    #TODO: should be a peak finder, not an integrator?
    movwin = lambda a, l:  np.lib.stride_tricks.as_strided(a, \
                 (a.shape[0] - l + 1, l), a.itemsize * np.ones(2))
    new_peak_list = []
    for hints in peak_list:
        t0, t1 = hints['t0'], hints['t1']

        # time the first whole "period" starts
        tpi = offset + period * ((t0 - offset) // period + 1)
        if tpi > t1:
            # the entire peak is within one "period"
            new_peak_list.append([t0, t1, hints])
            continue
        tp = np.hstack([[t0], np.arange(tpi, t1, period)])
        if tp[-1] != t1:
            # add the last point to the list
            tp = np.hstack([tp, [t1]])
        for tp0, tp1 in movwin(tp, 2):
            new_hints = {'pf': hints.get('pf', '')}
            if 'y0' in hints and 'y1' in hints:
                # calculate the new baseline for this peak
                xs, ys = [t0, t1], [hints['y0'], hints['y1']]
                new_hints['y0'] = np.interp(tp0, xs, ys)
                new_hints['y1'] = np.interp(tp1, xs, ys)
            new_peak_list.append([tp0, tp1, new_hints])

    peaks = simple_integrate(ts, new_peak_list)
    for p in peaks:
        p.info['p-create'] = p.info['p-create'].split(',')[0] + \
                ',periodic_integrate'
    return peaks
Esempio n. 30
0
 def sanitize_data(self):
     """Fill the series via interpolation"""
     validx = None; validy = None
     countx = None; county = None  
     if self.x is not None:
         validx = np.sum(np.isfinite(self.x))
         countx = float(self.x.size)
     else: 
         raise ValueError("The x-axis is not populated, calculate values before you interpolate.")
     if self.y is not None:
         validy = np.sum(np.isfinite(self.y))
         county = float(self.y.size)
     else: 
         raise ValueError("The y-axis is not populated, calculate values before you interpolate.")
     
     if min([validx/countx,validy/county]) < self.VALID_REQ: 
         warnings.warn(
           "Poor data quality, there are not enough valid entries for x ({0:f}/{1:f}) or y ({2:f}/{3:f}).".format(validx,countx,validy,county),
           UserWarning)
     # TODO: use filter and cubic splines!
     #filter = np.logical_and(np.isfinite(self.x),np.isfinite(self.y))
     if validy > validx:
         y = self.y[np.isfinite(self.y)]
         self.x = np.interp(y, self.y, self.x)
         self.y = y
     else:
         x = self.x[np.isfinite(self.x)] 
         self.y = np.interp(x, self.x, self.y)
         self.x = x
Esempio n. 31
0
dataSignal = formattedData[:, 2]

timestep = 1.0 / sampleFreq
splits = 1

# Calculating SpO2
IR = data[:, 2]
RED = data[:, 1]
IR = butterworthLowpassFilter(IR, 2, sampleFreq, 6)[2500:]
RED = butterworthLowpassFilter(RED, 2, sampleFreq, 6)[2500:]
xValues = [_ for _ in range(len(IR))]
redMaximums, redMinimums = findMaxMins(RED)
irMaximums, irMinimums = findMaxMins(IR)
redYValues = [RED[i] for i in redMinimums]
redYInterp = np.interp(xValues, redMinimums, redYValues)
irYValues = [IR[i] for i in irMinimums]
irYInterp = np.interp(xValues, irMinimums, irYValues)
acIr = [(IR[i] - irYInterp[i]) for i in irMaximums]
dcIr = [irYInterp[i] for i in irMaximums]
acRed = [(RED[i] - redYInterp[i]) for i in irMaximums]
dcRed = [redYInterp[i] for i in irMaximums]
spo2ans = []

for i in range(len(acRed)):
    ratioAverage = (acRed[i] * dcIr[i]) / (acIr[i] * dcRed[i])
    if ratioAverage > 1:
        continue
    spo2 = (-45.060 * ratioAverage * ratioAverage) + (30.354 *
                                                      ratioAverage) + 94.845
    if spo2 > 0:
Esempio n. 32
0
def train(hyp, opt, device, tb_writer=None, wandb=None):
    logger.info(f'Hyperparameters {hyp}')
    save_dir, epochs, batch_size, total_batch_size, weights, rank = \
        Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank

    # Directories
    wdir = save_dir / 'weights'
    wdir.mkdir(parents=True, exist_ok=True)  # make dir
    last = wdir / 'last.pt'
    best = wdir / 'best.pt'
    results_file = save_dir / 'results.txt'

    # Save run settings
    with open(save_dir / 'hyp.yaml', 'w') as f:
        yaml.dump(hyp, f, sort_keys=False)
    with open(save_dir / 'opt.yaml', 'w') as f:
        yaml.dump(vars(opt), f, sort_keys=False)

    # Configure
    plots = not opt.evolve  # create plots
    cuda = device.type != 'cpu'
    init_seeds(2 + rank)
    with open(opt.data) as f:
        data_dict = yaml.load(f, Loader=yaml.FullLoader)  # data dict
    with torch_distributed_zero_first(rank):
        check_dataset(data_dict)  # check
    train_path = data_dict['train']
    test_path = data_dict['val']
    nc, names = (1, ['item']) if opt.single_cls else (int(data_dict['nc']), data_dict['names'])  # number classes, names
    assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data)  # check

    # Model
    pretrained = weights.endswith('.pt')
    if pretrained:
        with torch_distributed_zero_first(rank):
            attempt_download(weights)  # download if not found locally
        ckpt = torch.load(weights, map_location=device)  # load checkpoint
        if hyp.get('anchors'):
            ckpt['model'].yaml['anchors'] = round(hyp['anchors'])  # force autoanchor
        model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc).to(device)  # create
        exclude = ['anchor'] if opt.cfg or hyp.get('anchors') else []  # exclude keys
        state_dict = ckpt['model'].float().state_dict()  # to FP32
        state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude)  # intersect
        model.load_state_dict(state_dict, strict=False)  # load
        logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights))  # report
    else:
        model = Model(opt.cfg, ch=3, nc=nc).to(device)  # create

    # Freeze
    freeze = []  # parameter names to freeze (full or partial)
    for k, v in model.named_parameters():
        v.requires_grad = True  # train all layers
        if any(x in k for x in freeze):
            print('freezing %s' % k)
            v.requires_grad = False

    # Optimizer
    nbs = 64  # nominal batch size
    accumulate = max(round(nbs / total_batch_size), 1)  # accumulate loss before optimizing
    hyp['weight_decay'] *= total_batch_size * accumulate / nbs  # scale weight_decay

    pg0, pg1, pg2 = [], [], []  # optimizer parameter groups
    for k, v in model.named_modules():
        if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):
            pg2.append(v.bias)  # biases
        if isinstance(v, nn.BatchNorm2d):
            pg0.append(v.weight)  # no decay
        elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):
            pg1.append(v.weight)  # apply decay

    if opt.adam:
        optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999))  # adjust beta1 to momentum
    else:
        optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)

    optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']})  # add pg1 with weight_decay
    optimizer.add_param_group({'params': pg2})  # add pg2 (biases)
    logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
    del pg0, pg1, pg2

    # Scheduler https://arxiv.org/pdf/1812.01187.pdf
    # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
    lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - hyp['lrf']) + hyp['lrf']  # cosine
    scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
    # plot_lr_scheduler(optimizer, scheduler, epochs)

    # Logging
    if wandb and wandb.run is None:
        opt.hyp = hyp  # add hyperparameters
        wandb_run = wandb.init(config=opt, resume="allow",
                               project='YOLOv3' if opt.project == 'runs/train' else Path(opt.project).stem,
                               name=save_dir.stem,
                               id=ckpt.get('wandb_id') if 'ckpt' in locals() else None)
    loggers = {'wandb': wandb}  # loggers dict

    # Resume
    start_epoch, best_fitness = 0, 0.0
    if pretrained:
        # Optimizer
        if ckpt['optimizer'] is not None:
            optimizer.load_state_dict(ckpt['optimizer'])
            best_fitness = ckpt['best_fitness']

        # Results
        if ckpt.get('training_results') is not None:
            with open(results_file, 'w') as file:
                file.write(ckpt['training_results'])  # write results.txt

        # Epochs
        start_epoch = ckpt['epoch'] + 1
        if opt.resume:
            assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs)
        if epochs < start_epoch:
            logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
                        (weights, ckpt['epoch'], epochs))
            epochs += ckpt['epoch']  # finetune additional epochs

        del ckpt, state_dict

    # Image sizes
    gs = int(max(model.stride))  # grid size (max stride)
    imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size]  # verify imgsz are gs-multiples

    # DP mode
    if cuda and rank == -1 and torch.cuda.device_count() > 1:
        model = torch.nn.DataParallel(model)

    # SyncBatchNorm
    if opt.sync_bn and cuda and rank != -1:
        model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
        logger.info('Using SyncBatchNorm()')

    # EMA
    ema = ModelEMA(model) if rank in [-1, 0] else None

    # DDP mode
    if cuda and rank != -1:
        model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank)

    # Trainloader
    dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
                                            hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,
                                            world_size=opt.world_size, workers=opt.workers,
                                            image_weights=opt.image_weights)
    mlc = np.concatenate(dataset.labels, 0)[:, 0].max()  # max label class
    nb = len(dataloader)  # number of batches
    assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)

    # Process 0
    if rank in [-1, 0]:
        ema.updates = start_epoch * nb // accumulate  # set EMA updates
        testloader = create_dataloader(test_path, imgsz_test, total_batch_size, gs, opt,  # testloader
                                       hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True,
                                       rank=-1, world_size=opt.world_size, workers=opt.workers, pad=0.5)[0]

        if not opt.resume:
            labels = np.concatenate(dataset.labels, 0)
            c = torch.tensor(labels[:, 0])  # classes
            # cf = torch.bincount(c.long(), minlength=nc) + 1.  # frequency
            # model._initialize_biases(cf.to(device))
            if plots:
                Thread(target=plot_labels, args=(labels, save_dir, loggers), daemon=True).start()
                if tb_writer:
                    tb_writer.add_histogram('classes', c, 0)

            # Anchors
            if not opt.noautoanchor:
                check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)

    # Model parameters
    hyp['cls'] *= nc / 80.  # scale coco-tuned hyp['cls'] to current dataset
    model.nc = nc  # attach number of classes to model
    model.hyp = hyp  # attach hyperparameters to model
    model.gr = 1.0  # iou loss ratio (obj_loss = 1.0 or iou)
    model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device)  # attach class weights
    model.names = names

    # Start training
    t0 = time.time()
    nw = max(round(hyp['warmup_epochs'] * nb), 1000)  # number of warmup iterations, max(3 epochs, 1k iterations)
    # nw = min(nw, (epochs - start_epoch) / 2 * nb)  # limit warmup to < 1/2 of training
    maps = np.zeros(nc)  # mAP per class
    results = (0, 0, 0, 0, 0, 0, 0)  # P, R, [email protected], [email protected], val_loss(box, obj, cls)
    scheduler.last_epoch = start_epoch - 1  # do not move
    scaler = amp.GradScaler(enabled=cuda)
    logger.info('Image sizes %g train, %g test\n'
                'Using %g dataloader workers\nLogging results to %s\n'
                'Starting training for %g epochs...' % (imgsz, imgsz_test, dataloader.num_workers, save_dir, epochs))
    for epoch in range(start_epoch, epochs):  # epoch ------------------------------------------------------------------
        model.train()

        # Update image weights (optional)
        if opt.image_weights:
            # Generate indices
            if rank in [-1, 0]:
                cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2  # class weights
                iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw)  # image weights
                dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n)  # rand weighted idx
            # Broadcast if DDP
            if rank != -1:
                indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int()
                dist.broadcast(indices, 0)
                if rank != 0:
                    dataset.indices = indices.cpu().numpy()

        # Update mosaic border
        # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
        # dataset.mosaic_border = [b - imgsz, -b]  # height, width borders

        mloss = torch.zeros(4, device=device)  # mean losses
        if rank != -1:
            dataloader.sampler.set_epoch(epoch)
        pbar = enumerate(dataloader)
        logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'targets', 'img_size'))
        if rank in [-1, 0]:
            pbar = tqdm(pbar, total=nb)  # progress bar
        optimizer.zero_grad()
        for i, (imgs, targets, paths, _) in pbar:  # batch -------------------------------------------------------------
            ni = i + nb * epoch  # number integrated batches (since train start)
            imgs = imgs.to(device, non_blocking=True).float() / 255.0  # uint8 to float32, 0-255 to 0.0-1.0

            # Warmup
            if ni <= nw:
                xi = [0, nw]  # x interp
                # model.gr = np.interp(ni, xi, [0.0, 1.0])  # iou loss ratio (obj_loss = 1.0 or iou)
                accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round())
                for j, x in enumerate(optimizer.param_groups):
                    # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
                    x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
                    if 'momentum' in x:
                        x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])

            # Multi-scale
            if opt.multi_scale:
                sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs  # size
                sf = sz / max(imgs.shape[2:])  # scale factor
                if sf != 1:
                    ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]]  # new shape (stretched to gs-multiple)
                    imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)

            # Forward
            with amp.autocast(enabled=cuda):
                pred = model(imgs)  # forward
                loss, loss_items = compute_loss(pred, targets.to(device), model)  # loss scaled by batch_size
                if rank != -1:
                    loss *= opt.world_size  # gradient averaged between devices in DDP mode

            # Backward
            scaler.scale(loss).backward()

            # Optimize
            if ni % accumulate == 0:
                scaler.step(optimizer)  # optimizer.step
                scaler.update()
                optimizer.zero_grad()
                if ema:
                    ema.update(model)

            # Print
            if rank in [-1, 0]:
                mloss = (mloss * i + loss_items) / (i + 1)  # update mean losses
                mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0)  # (GB)
                s = ('%10s' * 2 + '%10.4g' * 6) % (
                    '%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1])
                pbar.set_description(s)

                # Plot
                if plots and ni < 3:
                    f = save_dir / f'train_batch{ni}.jpg'  # filename
                    Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
                    # if tb_writer:
                    #     tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
                    #     tb_writer.add_graph(model, imgs)  # add model to tensorboard
                elif plots and ni == 3 and wandb:
                    wandb.log({"Mosaics": [wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg')]})

            # end batch ------------------------------------------------------------------------------------------------
        # end epoch ----------------------------------------------------------------------------------------------------

        # Scheduler
        lr = [x['lr'] for x in optimizer.param_groups]  # for tensorboard
        scheduler.step()

        # DDP process 0 or single-GPU
        if rank in [-1, 0]:
            # mAP
            if ema:
                ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride'])
            final_epoch = epoch + 1 == epochs
            if not opt.notest or final_epoch:  # Calculate mAP
                results, maps, times = test.test(opt.data,
                                                 batch_size=total_batch_size,
                                                 imgsz=imgsz_test,
                                                 model=ema.ema,
                                                 single_cls=opt.single_cls,
                                                 dataloader=testloader,
                                                 save_dir=save_dir,
                                                 plots=plots and final_epoch,
                                                 log_imgs=opt.log_imgs if wandb else 0)

            # Write
            with open(results_file, 'a') as f:
                f.write(s + '%10.4g' * 7 % results + '\n')  # P, R, [email protected], [email protected], val_loss(box, obj, cls)
            if len(opt.name) and opt.bucket:
                os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name))

            # Log
            tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss',  # train loss
                    'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
                    'val/box_loss', 'val/obj_loss', 'val/cls_loss',  # val loss
                    'x/lr0', 'x/lr1', 'x/lr2']  # params
            for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
                if tb_writer:
                    tb_writer.add_scalar(tag, x, epoch)  # tensorboard
                if wandb:
                    wandb.log({tag: x})  # W&B

            # Update best mAP
            fi = fitness(np.array(results).reshape(1, -1))  # weighted combination of [P, R, [email protected], [email protected]]
            if fi > best_fitness:
                best_fitness = fi

            # Save model
            save = (not opt.nosave) or (final_epoch and not opt.evolve)
            if save:
                with open(results_file, 'r') as f:  # create checkpoint
                    ckpt = {'epoch': epoch,
                            'best_fitness': best_fitness,
                            'training_results': f.read(),
                            'model': ema.ema,
                            'optimizer': None if final_epoch else optimizer.state_dict(),
                            'wandb_id': wandb_run.id if wandb else None}

                # Save last, best and delete
                torch.save(ckpt, last)
                if best_fitness == fi:
                    torch.save(ckpt, best)
                del ckpt
        # end epoch ----------------------------------------------------------------------------------------------------
    # end training

    if rank in [-1, 0]:
        # Strip optimizers
        for f in [last, best]:
            if f.exists():  # is *.pt
                strip_optimizer(f)  # strip optimizer
                os.system('gsutil cp %s gs://%s/weights' % (f, opt.bucket)) if opt.bucket else None  # upload

        # Plots
        if plots:
            plot_results(save_dir=save_dir)  # save as results.png
            if wandb:
                files = ['results.png', 'precision_recall_curve.png', 'confusion_matrix.png']
                wandb.log({"Results": [wandb.Image(str(save_dir / f), caption=f) for f in files
                                       if (save_dir / f).exists()]})
        logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))

        # Test best.pt
        if opt.data.endswith('coco.yaml') and nc == 80:  # if COCO
            results, _, _ = test.test(opt.data,
                                      batch_size=total_batch_size,
                                      imgsz=imgsz_test,
                                      model=attempt_load(best if best.exists() else last, device).half(),
                                      single_cls=opt.single_cls,
                                      dataloader=testloader,
                                      save_dir=save_dir,
                                      save_json=True,  # use pycocotools
                                      plots=False)

    else:
        dist.destroy_process_group()

    wandb.run.finish() if wandb and wandb.run else None
    torch.cuda.empty_cache()
    return results
Esempio n. 33
0
def single_pulses(n_files, n_pulses, polycos, bestprof, n_bins=1220):
    ''' This funtion converts the output files from waterfall.py into
    a single csv files with all the single pulses (row = one pulse). 
    Also plots the folded pulse to check.
    
    Args:
    n_files: (int) number of imput files counting from 0
    n_pulses: (int) how many single pulses you want
    period: (float) period of the pulse in seconds obtained from presto
    n_bins: (int) how many points per single pulse. default: 1220
    
    Returns: nothing
    
    '''

    # times array
    file_name_1 = 'times_{}.csv'
    df_list_1 = []
    for i in range(0, n_files + 1):
        df_list_1.append(pd.read_csv(file_name_1.format(i), header=None))
        times = (pd.concat(df_list_1).to_numpy()).T.flatten()

    # intensity array
    file_name_2 = 'original_{}.csv'
    df_list_2 = []
    for i in range(0, n_files + 1):
        df_list_2.append(pd.read_csv(file_name_2.format(i), header=None))
        originals = (pd.concat(df_list_2).to_numpy()).T.flatten()

    # find the MJD0
    MJD0 = np.genfromtxt(bestprof,
                         comments="none",
                         dtype=np.float128,
                         skip_header=3,
                         max_rows=1,
                         usecols=(3))

    # create a new vector of times to match the period:
    new_times = np.zeros(
        n_bins * n_pulses
    )  # vector with (corrected) time in seconds since the start of the observation
    new_times[0] = times[0]

    MJD_times = np.zeros(
        n_pulses
    )  # array with the MJD at the beggining of each individual pulse

    for n in range(0, n_pulses):
        temp = new_times[
            n *
            n_bins]  # time (in seconds since the beggining of the observation) at the beggining of each pulse
        period = findP(polycos, MJD0, temp)  # we find the instantaneous period
        new_dt = period / n_bins  # we divide the pulse into bins

        #       print(str(n) + ' ' + str(n*n_bins) + ' ' + str(new_times[n * n_bins]) + ' ' + str(period*1000.))

        for i in range(0, n_bins):
            if n * n_bins + i + 1 == n_bins * n_pulses: continue
            new_times[n * n_bins + i +
                      1] = new_times[n * n_bins +
                                     i] + new_dt  # we save the corrected time

        MJD_times[n] = MJD0 + temp * 1. / (24. * 60. * 60.)

    # Interpolation
    new_data = (np.interp(new_times, times, originals)).reshape(
        (n_pulses, n_bins))

    # Write table
    obs_data = bestprof[bestprof.find('_A') +
                        1:bestprof.find('.pfd')]  # get the antenna and date
    output_csv = "sp_" + obs_data + ".csv"  # name of the output .csv

    np.savetxt(output_csv, new_data, delimiter=',')  # save as ascii file
    np.save(output_csv.replace(".csv", ".npy"),
            new_data)  # save as binary file

    np.savetxt("sp_MJD_" + obs_data + ".csv", MJD_times,
               delimiter=',')  # save the single pulses MJDs
Esempio n. 34
0
# ax = fig.add_subplot(gs3[0,1])
# pO   = ax.plot(u/Umax,z/H,'-or',label=r"OpenFOAM")
# pw   = ax.plot(uw,zuw,'xb',label=r"Wilcox (1998)")
# pwlr = ax.plot(uwlr,zuwlr,'+g',label=r"Wilcox Low-Re (2006)")
# pDNS = ax.plot(uDNS,zuDNS,'^k',label=r"DNS (1999)")
# xlabel(r'$u/u_{max}$',fontsize=20)
# #ylabel(r'z / h')
# handles, labels = ax.get_legend_handles_labels()
# ax.legend(handles,labels,numpoints=1,loc='best',fontsize=17)
# ax.axis([0, 1.05, 0, 1.02])

# ax = fig.add_subplot(gs3[0,2])
# pO   = ax.plot(k/utau**2,z/H,'-or',label="OpenFOAM")
# pw   = ax.plot(kw,zkw,'xb',label="Wilcox (1998)")
# pwlr = ax.plot(kwlr,zkwlr,'+g',label="Wilcox Low-Re (20??)")
# pDNS = ax.plot(kDNS,zkDNS,'^k',label="DNS (19??)")
# xlabel(r'$k/u_{\tau}^2$',fontsize=20)
# #ylabel(r'z / h')
# ax.axis([0, 5, 0, 1.02])
# show()
# #=============================================================================

u_interp = np.interp(zuw, z[:] / H, U[0, :] / Umax)
rms_u = rms(u_interp - uw)
assert (rms_u <= 0.04)

k_interp = np.interp(zkw, z[:] / H, k[:] / utau**2)
rms_k = rms(k_interp - kw)
assert (rms_k <= 0.16)
r2 = np.clip(a, a_min, a_max)  # Clip (limit) the values in an array.
r3 = np.sqrt(x)  # Return the positive square-root of an array, element-wise.
r4 = np.square(x)  # Return the element-wise square of the input.
r5 = np.absolute(x)  # Calculate the absolute value element-wise.
r6 = np.fabs(x)  # Compute the absolute values element-wise.
r7 = np.sign(x)  # Returns an element-wise indication of the sign of a number.
r8 = np.maximum(x1, x2)  # Element-wise maximum of array elements.
r9 = np.minimum(x1, x2)  # Element-wise minimum of array elements.
r10 = np.fmax(x1, x2)  # Element-wise maximum of array elements.
r11 = np.fmin(x1, x2)  # Element-wise minimum of array elements.
r12 = np.nan_to_num(x)  # Replace nan with zero and inf with finite numbers.
r13 = np.real_if_close(
    a
)  # If complex input returns a real array if complex parts are close to zero.
# Type error
r14b = np.interp(x, xp, fp)  #One-dimensional linear interpolation.

a = [2.1, 3.4, 5.6]
a_min = 1
a_max = 3
v = 3.4
x = [3.5, 3.6, 3.7, 3.8]
xp = 5.6
fp = 5.8
x1 = 3.6
x2 = 3.7

r14 = np.convolve(
    a, v
)  # Returns the discrete, linear convolution of two one-dimensional sequences.
r15 = np.clip(a, a_min, a_max)  # Clip (limit) the values in an array.
Esempio n. 36
0
def run(drug_channel):
    drug,channel = drug_channel
    print "\n\n{} + {}\n\n".format(drug,channel)
    
    num_expts, experiment_numbers, experiments = dr.load_crumb_data(drug,channel)
    if (0 < args.num_expts < num_expts):
        num_expts = args.num_expts
        
    drug, channel, output_dir, chain_dir, figs_dir, chain_file = dr.hierarchical_output_dirs_and_chain_file(drug,channel,num_expts)
    
    hill_cdf_file, pic50_cdf_file = dr.hierarchical_posterior_predictive_cdf_files(drug,channel,num_expts)
    
    hill_cdf = np.loadtxt(hill_cdf_file)
    pic50_cdf = np.loadtxt(pic50_cdf_file)
    
    num_samples = 2000
    
    unif_hill_samples = npr.rand(num_samples)
    unif_pic50_samples = npr.rand(num_samples)
    
    hill_samples = np.interp(unif_hill_samples, hill_cdf[:,1], hill_cdf[:,0])
    pic50_samples = np.interp(unif_pic50_samples, pic50_cdf[:,1], pic50_cdf[:,0])
    
    
    
    
    fig = plt.figure(figsize=(11,7))
    
    
    ax1 = fig.add_subplot(231)
    ax1.grid()
    xmin = -4
    xmax = 3
    concs = np.logspace(xmin,xmax,101)
    ax1.set_xscale('log')
    ax1.set_ylim(0,100)
    ax1.set_xlabel(r'{} concentration ($\mu$M)'.format(drug))
    ax1.set_ylabel(r'% {} block'.format(channel))
    ax1.set_title('A. Hierarchical predicted\nfuture experiments')
    ax1.set_xlim(10**xmin,10**xmax)
    
    for expt in experiment_numbers:
        ax1.scatter(experiments[expt][:,0],experiments[expt][:,1],label='Expt {}'.format(expt+1),color=colors[expt],s=100,zorder=10)
    
    for i, conc in enumerate(args.concs):
        ax1.axvline(conc,color=colors[3+i],lw=2,label=r"{} $\mu$M".format(conc),alpha=0.8)
    for i in xrange(num_samples):
        ax1.plot(concs,dr.dose_response_model(concs,hill_samples[i],dr.pic50_to_ic50(pic50_samples[i])),color='black',alpha=0.01)
    ax1.legend(loc=2,fontsize=10)
    
    num_hist_samples = 100000
    
    unif_hill_samples = npr.rand(num_hist_samples)
    unif_pic50_samples = npr.rand(num_hist_samples)
    
    hill_samples = np.interp(unif_hill_samples, hill_cdf[:,1], hill_cdf[:,0])
    pic50_samples = np.interp(unif_pic50_samples, pic50_cdf[:,1], pic50_cdf[:,0])
    
    ax2 = fig.add_subplot(234)
    ax2.set_xlim(0,100)
    ax2.set_xlabel(r'% {} block'.format(channel))
    ax2.set_ylabel(r'Probability density')
    ax2.grid()
    for i, conc in enumerate(args.concs):
        ax2.hist(dr.dose_response_model(conc,hill_samples,dr.pic50_to_ic50(pic50_samples)),bins=50,normed=True,color=colors[3+i],alpha=0.8,edgecolor='none',label=r"{} $\mu$M {}".format(conc,drug))
    
    ax2.set_title('D. Hierarchical predicted\nfuture experiments')
    ax2.legend(loc=2,fontsize=10)
        
    ax3 = fig.add_subplot(232,sharey=ax1)
    ax3.grid()
    xmin = -4
    xmax = 3
    concs = np.logspace(xmin,xmax,101)
    ax3.set_xscale('log')
    ax3.set_ylim(0,100)
    ax3.set_xlabel(r'{} concentration ($\mu$M)'.format(drug))
    ax3.set_title('B. Hierarchical inferred\nunderlying effects')
    ax3.set_xlim(10**xmin,10**xmax)
    
    for expt in experiment_numbers:
        ax3.scatter(experiments[expt][:,0],experiments[expt][:,1],label='Expt {}'.format(expt+1),color=colors[expt],s=100,zorder=10)
    
    chain = np.loadtxt(chain_file)
    end = chain.shape[0]
    burn = end/4
    
    num_samples = 1000
    alpha_indices = npr.randint(burn,end,num_samples)
    alpha_samples = chain[alpha_indices,0]
    mu_samples = chain[alpha_indices,2]
    for i, conc in enumerate(args.concs):
        ax3.axvline(conc,color=colors[3+i],lw=2,label=r"{} $\mu$M".format(conc),alpha=0.8)
    for i in xrange(num_samples):
        ax3.plot(concs,dr.dose_response_model(concs,alpha_samples[i],dr.pic50_to_ic50(mu_samples[i])),color='black',alpha=0.01)
    ax3.legend(loc=2,fontsize=10)
    ax4 = fig.add_subplot(235,sharey=ax2)
    ax4.set_xlim(0,100)
    ax4.set_xlabel(r'% {} block'.format(channel))
    ax4.grid()
    
    num_hist_samples = 100000
    hist_indices = npr.randint(burn,end,num_hist_samples)
    alphas = chain[hist_indices,0]
    mus = chain[hist_indices,2]
    
    for i, conc in enumerate(args.concs):
        ax4.hist(dr.dose_response_model(conc,alphas,dr.pic50_to_ic50(mus)),bins=50,normed=True,color=colors[3+i],alpha=0.8,edgecolor='none',label=r"{} $\mu$M {}".format(conc,drug))
    ax4.set_title('E. Hierarchical inferred\nunderlying effects')
    
    plt.setp(ax3.get_yticklabels(), visible=False)
    plt.setp(ax4.get_yticklabels(), visible=False)
    
    
    # now plot non-hierarchical
    
    num_params = 3
    drug,channel,chain_file,figs_dir = dr.nonhierarchical_chain_file_and_figs_dir(drug, channel, args.fix_hill)
    chain = np.loadtxt(chain_file,usecols=range(num_params-1)) # not interested in log-target values right now
    end = chain.shape[0]
    burn = end/4

    num_samples = 1000
    sample_indices = npr.randint(burn,end,num_samples)
    samples = chain[sample_indices,:]
    
    
    ax5 = fig.add_subplot(233,sharey=ax1)
    ax5.grid()
    plt.setp(ax5.get_yticklabels(), visible=False)
    xmin = -4
    xmax = 4
    concs = np.logspace(xmin,xmax,101)
    ax5.set_xscale('log')
    ax5.set_ylim(0,100)
    ax5.set_xlim(10**xmin,10**xmax)
    ax5.set_xlabel(r'{} concentration ($\mu$M)'.format(drug))
    ax5.set_title('C. Single-level inferred\neffects')
    ax5.legend(fontsize=10)
    
    for expt in experiment_numbers:
        if expt==1:
            ax5.scatter(experiments[expt][:,0],experiments[expt][:,1],color='orange',s=100,label='All expts',zorder=10)
        else:
            ax5.scatter(experiments[expt][:,0],experiments[expt][:,1],color='orange',s=100,zorder=10)
    
    for i, conc in enumerate(args.concs):
        ax5.axvline(conc,color=colors[3+i],alpha=0.8,lw=2,label=r"{} $\mu$M".format(conc))
    for i in xrange(num_samples):
        ax5.plot(concs,dr.dose_response_model(concs,samples[i,0],dr.pic50_to_ic50(samples[i,1])),color='black',alpha=0.01)
    ax5.legend(loc=2,fontsize=10)
    
    num_hist_samples = 50000
    sample_indices = npr.randint(burn,end,num_hist_samples)
    samples = chain[sample_indices,:]
    ax6 = fig.add_subplot(236,sharey=ax2)
    ax6.set_xlim(0,100)
    ax6.set_xlabel(r'% {} block'.format(channel))
    plt.setp(ax6.get_yticklabels(), visible=False)
    ax6.grid()
    for i, conc in enumerate(args.concs):
        ax6.hist(dr.dose_response_model(conc,samples[:,0],dr.pic50_to_ic50(samples[:,1])),bins=50,normed=True,alpha=0.8,color=colors[3+i],edgecolor='none',label=r"{} $\mu$M {}".format(conc,drug))
    ax6.set_title('F. Single-level inferred\neffects')

    ax2.legend(loc=2,fontsize=10)
    
    
    
    plot_dir = dr.all_predictions_dir(drug,channel)
    
    fig.tight_layout()
    fig.savefig(plot_dir+'{}_{}_all_predictions.png'.format(drug,channel))
    fig.savefig(plot_dir+'{}_{}_all_predictions.pdf'.format(drug,channel)) # uncomment to save as pdf, or change extension to whatever you want
    

    plt.close()
    
    print "Figures saved in", plot_dir
Esempio n. 37
0
def train(
    hyp,  # path/to/hyp.yaml or hyp dictionary
    opt,
    device,
):
    save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, = \
        Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \
        opt.resume, opt.noval, opt.nosave, opt.workers

    # Directories
    w = save_dir / 'weights'  # weights dir
    w.mkdir(parents=True, exist_ok=True)  # make dir
    last, best = w / 'last.pt', w / 'best.pt'

    # Hyperparameters
    if isinstance(hyp, str):
        with open(hyp) as f:
            hyp = yaml.safe_load(f)  # load hyps dict
    LOGGER.info(
        colorstr('hyperparameters: ') + ', '.join(f'{k}={v}'
                                                  for k, v in hyp.items()))

    # Save run settings
    with open(save_dir / 'hyp.yaml', 'w') as f:
        yaml.safe_dump(hyp, f, sort_keys=False)
    with open(save_dir / 'opt.yaml', 'w') as f:
        yaml.safe_dump(vars(opt), f, sort_keys=False)

    # Config
    plots = not evolve  # create plots
    cuda = device.type != 'cpu'
    init_seeds(1 + RANK)
    with torch_distributed_zero_first(RANK):
        data_dict = check_dataset(data)  # check
    train_path, val_path = data_dict['train'], data_dict['val']
    nc = 1 if single_cls else int(data_dict['nc'])  # number of classes
    names = ['item'] if single_cls and len(
        data_dict['names']) != 1 else data_dict['names']  # class names
    assert len(
        names
    ) == nc, f'{len(names)} names found for nc={nc} dataset in {data}'  # check
    # is_coco = data.endswith('coco.yaml') and nc == 80  # COCO dataset
    is_coco = data.endswith('top3.yaml') and nc == 5  # COCO dataset
    # Loggers
    if RANK in [-1, 0]:
        loggers = Loggers(save_dir, weights, opt, hyp, data_dict,
                          LOGGER).start()  # loggers dict
        if loggers.wandb and resume:
            weights, epochs, hyp, data_dict = opt.weights, opt.epochs, opt.hyp, loggers.wandb.data_dict

    # Model
    pretrained = weights.endswith('.pt')
    pretrained = False
    if pretrained:
        with torch_distributed_zero_first(RANK):
            weights = attempt_download(
                weights)  # download if not found locally
        ckpt = torch.load(weights, map_location=device)  # load checkpoint
        model = Model(cfg or ckpt['model'].yaml,
                      ch=3,
                      nc=nc,
                      anchors=hyp.get('anchors')).to(device)  # create
        exclude = [
            'anchor'
        ] if (cfg or hyp.get('anchors')) and not resume else []  # exclude keys
        csd = ckpt['model'].float().state_dict(
        )  # checkpoint state_dict as FP32
        csd = intersect_dicts(csd, model.state_dict(),
                              exclude=exclude)  # intersect
        model.load_state_dict(csd, strict=False)  # load
        LOGGER.info(
            f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}'
        )  # report
    else:
        model = Model(cfg, ch=3, nc=nc,
                      anchors=hyp.get('anchors')).to(device)  # create

    # Freeze
    freeze = []  # parameter names to freeze (full or partial)
    for k, v in model.named_parameters():
        v.requires_grad = True  # train all layers
        if any(x in k for x in freeze):
            print(f'freezing {k}')
            v.requires_grad = False

    # Optimizer
    nbs = 64  # nominal batch size
    accumulate = max(round(nbs / batch_size),
                     1)  # accumulate loss before optimizing
    hyp['weight_decay'] *= batch_size * accumulate / nbs  # scale weight_decay
    LOGGER.info(f"Scaled weight_decay = {hyp['weight_decay']}")

    g0, g1, g2 = [], [], []  # optimizer parameter groups
    for v in model.modules():
        if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):  # bias
            g2.append(v.bias)
        if isinstance(v, nn.BatchNorm2d):  # weight with decay
            g0.append(v.weight)
        elif hasattr(v, 'weight') and isinstance(
                v.weight, nn.Parameter):  # weight without decay
            g1.append(v.weight)

    if opt.adam:
        optimizer = Adam(g0, lr=hyp['lr0'],
                         betas=(hyp['momentum'],
                                0.999))  # adjust beta1 to momentum
    else:
        optimizer = SGD(g0,
                        lr=hyp['lr0'],
                        momentum=hyp['momentum'],
                        nesterov=True)

    optimizer.add_param_group({
        'params': g1,
        'weight_decay': hyp['weight_decay']
    })  # add g1 with weight_decay
    optimizer.add_param_group({'params': g2})  # add g2 (biases)
    LOGGER.info(
        f"{colorstr('optimizer:')} {type(optimizer).__name__} with parameter groups "
        f"{len(g0)} weight, {len(g1)} weight (no decay), {len(g2)} bias")
    del g0, g1, g2

    # Scheduler
    if opt.linear_lr:
        lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp[
            'lrf']  # linear
    else:
        lf = one_cycle(1, hyp['lrf'], epochs)  # cosine 1->hyp['lrf']
    scheduler = lr_scheduler.LambdaLR(
        optimizer,
        lr_lambda=lf)  # plot_lr_scheduler(optimizer, scheduler, epochs)

    # EMA
    ema = ModelEMA(model) if RANK in [-1, 0] else None

    # Resume
    start_epoch, best_fitness = 0, 0.0
    if pretrained:
        # Optimizer
        if ckpt['optimizer'] is not None:
            optimizer.load_state_dict(ckpt['optimizer'])
            best_fitness = ckpt['best_fitness']

        # EMA
        if ema and ckpt.get('ema'):
            ema.ema.load_state_dict(ckpt['ema'].float().state_dict())
            ema.updates = ckpt['updates']

        # Epochs
        start_epoch = ckpt['epoch'] + 1
        if resume:
            assert start_epoch > 0, f'{weights} training to {epochs} epochs is finished, nothing to resume.'
        if epochs < start_epoch:
            LOGGER.info(
                f"{weights} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {epochs} more epochs."
            )
            epochs += ckpt['epoch']  # finetune additional epochs

        del ckpt, csd

    # Image sizes
    gs = max(int(model.stride.max()), 32)  # grid size (max stride)
    nl = model.model[
        -1].nl  # number of detection layers (used for scaling hyp['obj'])
    imgsz = check_img_size(opt.imgsz, gs,
                           floor=gs * 2)  # verify imgsz is gs-multiple

    # DP mode
    if cuda and RANK == -1 and torch.cuda.device_count() > 1:
        logging.warning(
            'DP not recommended, instead use torch.distributed.run for best DDP Multi-GPU results.\n'
            'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.'
        )
        model = torch.nn.DataParallel(model)

    # SyncBatchNorm
    if opt.sync_bn and cuda and RANK != -1:
        model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
        LOGGER.info('Using SyncBatchNorm()')

    # Trainloader
    train_loader, dataset = create_dataloader(train_path,
                                              imgsz,
                                              batch_size // WORLD_SIZE,
                                              gs,
                                              single_cls,
                                              hyp=hyp,
                                              augment=True,
                                              cache=opt.cache_images,
                                              rect=opt.rect,
                                              rank=RANK,
                                              workers=workers,
                                              image_weights=opt.image_weights,
                                              quad=opt.quad,
                                              prefix=colorstr('train: '))
    mlc = np.concatenate(dataset.labels, 0)[:, 0].max()  # max label class
    nb = len(train_loader)  # number of batches
    assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}'

    # Process 0
    if RANK in [-1, 0]:
        val_loader = create_dataloader(val_path,
                                       imgsz,
                                       batch_size // WORLD_SIZE * 2,
                                       gs,
                                       single_cls,
                                       hyp=hyp,
                                       cache=opt.cache_images and not noval,
                                       rect=True,
                                       rank=-1,
                                       workers=workers,
                                       pad=0.5,
                                       prefix=colorstr('val: '))[0]

        if not resume:
            labels = np.concatenate(dataset.labels, 0)
            # c = torch.tensor(labels[:, 0])  # classes
            # cf = torch.bincount(c.long(), minlength=nc) + 1.  # frequency
            # model._initialize_biases(cf.to(device))
            if plots:
                plot_labels(labels, names, save_dir, loggers)

            # Anchors
            if not opt.noautoanchor:
                check_anchors(dataset,
                              model=model,
                              thr=hyp['anchor_t'],
                              imgsz=imgsz)
            model.half().float()  # pre-reduce anchor precision

    # DDP mode
    if cuda and RANK != -1:
        model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK)

    # Model parameters
    hyp['box'] *= 3. / nl  # scale to layers
    hyp['cls'] *= nc / 80. * 3. / nl  # scale to classes and layers
    hyp['obj'] *= (imgsz / 640)**2 * 3. / nl  # scale to image size and layers
    hyp['label_smoothing'] = opt.label_smoothing
    model.nc = nc  # attach number of classes to model
    model.hyp = hyp  # attach hyperparameters to model
    model.class_weights = labels_to_class_weights(
        dataset.labels, nc).to(device) * nc  # attach class weights
    model.names = names

    # Start training
    t0 = time.time()
    nw = max(round(hyp['warmup_epochs'] * nb),
             1000)  # number of warmup iterations, max(3 epochs, 1k iterations)
    # nw = min(nw, (epochs - start_epoch) / 2 * nb)  # limit warmup to < 1/2 of training
    last_opt_step = -1
    maps = np.zeros(nc)  # mAP per class
    results = (0, 0, 0, 0, 0, 0, 0
               )  # P, R, [email protected], [email protected], val_loss(box, obj, cls)
    scheduler.last_epoch = start_epoch - 1  # do not move
    scaler = amp.GradScaler(enabled=cuda)
    compute_loss = ComputeLoss(model)  # init loss class
    LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n'
                f'Using {train_loader.num_workers} dataloader workers\n'
                f'Logging results to {save_dir}\n'
                f'Starting training for {epochs} epochs...')
    for epoch in range(
            start_epoch, epochs
    ):  # epoch ------------------------------------------------------------------
        model.train()

        # Update image weights (optional)
        if opt.image_weights:
            # Generate indices
            if RANK in [-1, 0]:
                cw = model.class_weights.cpu().numpy() * (
                    1 - maps)**2 / nc  # class weights
                iw = labels_to_image_weights(dataset.labels,
                                             nc=nc,
                                             class_weights=cw)  # image weights
                dataset.indices = random.choices(
                    range(dataset.n), weights=iw,
                    k=dataset.n)  # rand weighted idx
            # Broadcast if DDP
            if RANK != -1:
                indices = (torch.tensor(dataset.indices)
                           if RANK == 0 else torch.zeros(dataset.n)).int()
                dist.broadcast(indices, 0)
                if RANK != 0:
                    dataset.indices = indices.cpu().numpy()

        # Update mosaic border
        # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
        # dataset.mosaic_border = [b - imgsz, -b]  # height, width borders

        mloss = torch.zeros(3, device=device)  # mean losses
        if RANK != -1:
            train_loader.sampler.set_epoch(epoch)
        pbar = enumerate(train_loader)
        LOGGER.info(
            ('\n' + '%10s' * 7) %
            ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'labels', 'img_size'))
        if RANK in [-1, 0]:
            pbar = tqdm(pbar, total=nb)  # progress bar
        optimizer.zero_grad()
        for i, (
                imgs, targets, paths, _
        ) in pbar:  # batch -------------------------------------------------------------
            ni = i + nb * epoch  # number integrated batches (since train start)
            imgs = imgs.to(device, non_blocking=True).float(
            ) / 255.0  # uint8 to float32, 0-255 to 0.0-1.0

            # Warmup
            if ni <= nw:
                xi = [0, nw]  # x interp
                # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0])  # iou loss ratio (obj_loss = 1.0 or iou)
                accumulate = max(
                    1,
                    np.interp(ni, xi, [1, nbs / batch_size]).round())
                for j, x in enumerate(optimizer.param_groups):
                    # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
                    x['lr'] = np.interp(ni, xi, [
                        hyp['warmup_bias_lr'] if j == 2 else 0.0,
                        x['initial_lr'] * lf(epoch)
                    ])
                    if 'momentum' in x:
                        x['momentum'] = np.interp(
                            ni, xi, [hyp['warmup_momentum'], hyp['momentum']])

            # Multi-scale
            if opt.multi_scale:
                sz = random.randrange(imgsz * 0.5,
                                      imgsz * 1.5 + gs) // gs * gs  # size
                sf = sz / max(imgs.shape[2:])  # scale factor
                if sf != 1:
                    ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]
                          ]  # new shape (stretched to gs-multiple)
                    imgs = nn.functional.interpolate(imgs,
                                                     size=ns,
                                                     mode='bilinear',
                                                     align_corners=False)

            # Forward
            with amp.autocast(enabled=cuda):
                pred = model(imgs)  # forward
                loss, loss_items = compute_loss(
                    pred, targets.to(device))  # loss scaled by batch_size
                if RANK != -1:
                    loss *= WORLD_SIZE  # gradient averaged between devices in DDP mode
                if opt.quad:
                    loss *= 4.

            # Backward
            scaler.scale(loss).backward()

            # Optimize
            if ni - last_opt_step >= accumulate:
                scaler.step(optimizer)  # optimizer.step
                scaler.update()
                optimizer.zero_grad()
                if ema:
                    ema.update(model)
                last_opt_step = ni

            # Log
            if RANK in [-1, 0]:
                mloss = (mloss * i + loss_items) / (i + 1
                                                    )  # update mean losses
                mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G'  # (GB)
                pbar.set_description(('%10s' * 2 + '%10.4g' * 5) %
                                     (f'{epoch}/{epochs - 1}', mem, *mloss,
                                      targets.shape[0], imgs.shape[-1]))
                loggers.on_train_batch_end(ni, model, imgs, targets, paths,
                                           plots)

            # end batch ------------------------------------------------------------------------------------------------

        # Scheduler
        lr = [x['lr'] for x in optimizer.param_groups]  # for loggers
        scheduler.step()

        if RANK in [-1, 0]:
            # mAP
            loggers.on_train_epoch_end(epoch)
            ema.update_attr(model,
                            include=[
                                'yaml', 'nc', 'hyp', 'names', 'stride',
                                'class_weights'
                            ])
            final_epoch = epoch + 1 == epochs
            if not noval or final_epoch:  # Calculate mAP
                results, maps, _ = val.run(data_dict,
                                           batch_size=batch_size //
                                           WORLD_SIZE * 2,
                                           imgsz=imgsz,
                                           model=ema.ema,
                                           single_cls=single_cls,
                                           dataloader=val_loader,
                                           save_dir=save_dir,
                                           save_json=is_coco and final_epoch,
                                           verbose=nc < 50 and final_epoch,
                                           plots=plots and final_epoch,
                                           loggers=loggers,
                                           compute_loss=compute_loss)

            # Update best mAP
            fi = fitness(np.array(results).reshape(
                1, -1))  # weighted combination of [P, R, [email protected], [email protected]]
            if fi > best_fitness:
                best_fitness = fi
            loggers.on_train_val_end(mloss, results, lr, epoch, best_fitness,
                                     fi)

            # Save model
            if (not nosave) or (final_epoch and not evolve):  # if save
                ckpt = {
                    'epoch':
                    epoch,
                    'best_fitness':
                    best_fitness,
                    'model':
                    deepcopy(de_parallel(model)).half(),
                    'ema':
                    deepcopy(ema.ema).half(),
                    'updates':
                    ema.updates,
                    'optimizer':
                    optimizer.state_dict(),
                    'wandb_id':
                    loggers.wandb.wandb_run.id if loggers.wandb else None
                }

                # Save last, best and delete
                torch.save(ckpt, last)
                if best_fitness == fi:
                    torch.save(ckpt, best)
                del ckpt
                loggers.on_model_save(last, epoch, final_epoch, best_fitness,
                                      fi)

        # end epoch ----------------------------------------------------------------------------------------------------
    # end training -----------------------------------------------------------------------------------------------------
    if RANK in [-1, 0]:
        LOGGER.info(
            f'{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\n'
        )
        if not evolve:
            if is_coco:  # COCO dataset
                for m in [last, best
                          ] if best.exists() else [last]:  # speed, mAP tests
                    results, _, _ = val.run(
                        data_dict,
                        batch_size=batch_size // WORLD_SIZE * 2,
                        imgsz=imgsz,
                        model=attempt_load(m, device).half(),
                        iou_thres=
                        0.7,  # NMS IoU threshold for best pycocotools results
                        single_cls=single_cls,
                        dataloader=val_loader,
                        save_dir=save_dir,
                        save_json=False,
                        plots=False)
            # Strip optimizers
            for f in last, best:
                if f.exists():
                    strip_optimizer(f)  # strip optimizers
        loggers.on_train_end(last, best, plots)

    torch.cuda.empty_cache()
    return results
Esempio n. 38
0
    def _s2_aerosol(self, ):

        self.s2_logger.propagate = False
        self.s2_logger.info('Start to retrieve atmospheric parameters.')
        self.s2 = read_s2(self.s2_toa_dir, self.s2_tile, self.year, self.month,
                          self.day, self.s2_u_bands)
        self.s2_logger.info('Reading in TOA reflectance.')
        selected_img = self.s2.get_s2_toa()
        self.s2_file_dir = self.s2.s2_file_dir
        self.s2.get_s2_cloud()
        self.s2_logger.info('Loading emulators.')
        self._load_xa_xb_xc_emus()
        self.s2_logger.info(
            'Find corresponding pixels between S2 and MODIS tiles')
        tiles = Find_corresponding_pixels(self.s2.s2_file_dir + '/B04.jp2',
                                          destination_res=500)
        if len(tiles.keys()) > 1:
            self.s2_logger.info('This sentinel 2 tile covers %d MODIS tile.' %
                                len(tiles.keys()))
        self.mcd43_files = []
        boas, boa_qas, brdf_stds, Hxs, Hys = [], [], [], [], []
        self.s2_logger.info(
            'Getting the angles and simulated surface reflectance.')
        for key in tiles.keys():
            self.s2_logger.info('Getting BOA from MODIS tile: %s.' % key)
            mcd43_file = glob(self.mcd43_tmp %
                              (self.mcd43_dir, self.year, self.doy, key))[0]
            self.mcd43_files.append(mcd43_file)
            self.H_inds, self.L_inds = tiles[key]
            Lx, Ly = self.L_inds
            Hx, Hy = self.H_inds
            Hxs.append(Hx)
            Hys.append(Hy)
            self.s2.get_s2_angles(self.reconstruct_s2_angle)

            self.s2_angles = np.zeros((4, 6, len(Hx)))
            for j, band in enumerate(self.s2_u_bands[:-2]):
                self.s2_angles[[0,2],j,:] = (self.s2.angles['vza'][band])[Hx, Hy], \
                                            (self.s2.angles['vaa'][band])[Hx, Hy]

                self.s2_angles[[1,3],j,:] = self.s2.angles['sza'][Hx, Hy], \
                                                          self.s2.angles['saa'][Hx, Hy]

#use mean value to fill bad values
            for i in range(4):
                mask = ~np.isfinite(self.s2_angles[i])
                if mask.sum() > 0:
                    self.s2_angles[i][mask] = np.interp(np.flatnonzero(mask), \
                     np.flatnonzero(~mask), \
                                                                      self.s2_angles[i][~mask]) # simple interpolation
            vza, sza = self.s2_angles[:2]
            vaa, saa = self.s2_angles[2:]
            raa = vaa - saa
            # get the simulated surface reflectance
            s2_boa, s2_boa_qa, brdf_std = get_brdf_six(mcd43_file, angles=[vza, sza, raa],\
                                                              bands=(3,4,1,2,6,7), Linds= [Lx, Ly])
            boas.append(s2_boa)
            boa_qas.append(s2_boa_qa)
            brdf_stds.append(brdf_std)
        self.s2_boa = np.hstack(boas)
        self.s2_boa_qa = np.hstack(boa_qas)
        self.brdf_stds = np.hstack(brdf_stds)
        self.s2_logger.info('Applying spectral transform.')
        self.s2_boa = self.s2_boa*np.array(self.s2_spectral_transform)[0,:-1][...,None] + \
                                  np.array(self.s2_spectral_transform)[1,:-1][...,None]
        self.Hx = np.hstack(Hxs)
        self.Hy = np.hstack(Hys)
        del sza
        del vza
        del saa
        del vaa
        del raa
        del mask
        del boas
        del boa_qas
        del brdf_stds
        del Hxs
        del Hys
        shape = (self.num_blocks, self.s2.angles['sza'].shape[0] / self.num_blocks, \
                 self.num_blocks, self.s2.angles['sza'].shape[1] / self.num_blocks)
        self.sza = self.s2.angles['sza'].reshape(shape).mean(axis=(3, 1))
        self.saa = self.s2.angles['saa'].reshape(shape).mean(axis=(3, 1))
        self.vza = []
        self.vaa = []
        for band in self.s2_u_bands[:-2]:
            self.vza.append(
                self.s2.angles['vza'][band].reshape(shape).mean(axis=(3, 1)))
            self.vaa.append(
                self.s2.angles['vaa'][band].reshape(shape).mean(axis=(3, 1)))
        self.vza = np.array(self.vza)
        self.vaa = np.array(self.vaa)
        self.raa = self.saa[None, ...] - self.vaa
        self.s2_logger.info('Getting elevation.')
        example_file = self.s2.s2_file_dir + '/B04.jp2'
        ele_data = reproject_data(self.global_dem,
                                  example_file,
                                  outputType=gdal.GDT_Float32).data
        mask = ~np.isfinite(ele_data)
        ele_data = np.ma.array(ele_data, mask=mask) / 1000.
        self.elevation = ele_data.reshape((self.num_blocks, ele_data.shape[0] / self.num_blocks, \
                                           self.num_blocks, ele_data.shape[1] / self.num_blocks)).mean(axis=(3,1))

        self.s2_logger.info('Getting pripors from ECMWF forcasts.')
        sen_time_str = json.load(
            open(self.s2.s2_file_dir + '/tileInfo.json', 'r'))['timestamp']
        self.sen_time = datetime.datetime.strptime(sen_time_str,
                                                   u'%Y-%m-%dT%H:%M:%S.%fZ')
        aot, tcwv, tco3 = np.array(self._read_cams(example_file)).reshape((3, self.num_blocks, \
                                   self.block_size, self.num_blocks, self.block_size)).mean(axis=(4, 2))
        self.aot = aot  #* (1-0.14) # validation of +14% biase
        self.tco3 = tco3 * 46.698  #* (1 - 0.05)
        tcwv = tcwv / 10.
        self.tco3_unc = np.ones(self.tco3.shape) * 0.2
        self.aot_unc = np.ones(self.aot.shape) * 0.5

        self.s2_logger.info(
            'Trying to get the tcwv from the emulation of sen2cor look up table.'
        )
        try:
            self._get_tcwv(selected_img, self.s2.angles['vza'],
                           self.s2.angles['vaa'], self.s2.angles['sza'],
                           self.s2.angles['saa'], ele_data)
        except:
            self.s2_logger.warning(
                'Getting tcwv from the emulation of sen2cor look up table failed, ECMWF data used.'
            )
            self.tcwv = tcwv
            self.tcwv_unc = np.ones(self.tcwv.shape) * 0.2

        self.s2_logger.info('Trying to get the aot from ddv method.')
        try:
            solved = self._get_ddv_aot(selected_img)
            if solved[0] < 0:
                self.s2_logger.warning(
                    'DDV failed and only cams data used for the prior.')
            else:
                self.s2_logger.info(
                    'DDV solved aot is %.02f, and it will used as the mean value of cams prediction.'
                    % solved[0])
                self.aot += (solved[0] - self.aot.mean())
        except:
            self.s2_logger.warning('Getting aot from ddv failed.')
        self.s2_logger.info('Applying PSF model.')
        if self.s2_psf is None:
            xstd, ystd, ang, xs, ys = self._get_psf(selected_img)
        else:
            xstd, ystd, ang, xs, ys = self.s2_psf
        # apply psf shifts without going out of the image extend
        shifted_mask = np.logical_and.reduce(
            ((self.Hx + int(xs) >= 0), (self.Hx + int(xs) < self.full_res[0]),
             (self.Hy + int(ys) >= 0), (self.Hy + int(ys) < self.full_res[0])))

        self.Hx, self.Hy = self.Hx[shifted_mask] + int(
            xs), self.Hy[shifted_mask] + int(ys)
        #self.Lx, self.Ly = self.Lx[shifted_mask], self.Ly[shifted_mask]
        self.s2_boa = self.s2_boa[:, shifted_mask]
        self.s2_boa_qa = self.s2_boa_qa[:, shifted_mask]
        self.brdf_stds = self.brdf_stds[:, shifted_mask]

        self.s2_logger.info('Getting the convolved TOA reflectance.')
        self.valid_pixs = sum(
            shifted_mask)  # count how many pixels is still within the s2 tile
        ker_size = 2 * int(round(max(1.96 * xstd, 1.96 * ystd)))
        self.bad_pixs = np.zeros(self.valid_pixs).astype(bool)
        imgs = []
        for i, band in enumerate(self.s2_u_bands[:-2]):
            if selected_img[band].shape != self.full_res:
                imgs.append(
                    self.repeat_extend(selected_img[band],
                                       shape=self.full_res))
            else:
                imgs.append(selected_img[band])

        border_mask = np.zeros(self.full_res).astype(bool)
        border_mask[[0, -1], :] = True
        border_mask[:, [0, -1]] = True
        self.bad_pixs = cloud_dilation(self.s2.cloud | border_mask,
                                       iteration=ker_size / 2)[self.Hx,
                                                               self.Hy]
        del selected_img
        del self.s2.selected_img
        del self.s2.angles['vza']
        del self.s2.angles['vaa']
        del self.s2.angles['sza']
        del self.s2.angles['saa']
        del self.s2.sza
        del self.s2.saa
        del self.s2
        ker = self.gaussian(xstd, ystd, ang)
        f = lambda img: signal.fftconvolve(img, ker, mode='same')[self.Hx, self
                                                                  .Hy] * 0.0001
        half = parmap(f, imgs[:3])
        self.s2_toa = np.array(half + parmap(f, imgs[3:]))
        del imgs
        # get the valid value masks
        qua_mask = np.all(self.s2_boa_qa <= self.qa_thresh, axis=0)
        boa_mask = np.all(~self.s2_boa.mask,axis = 0 ) &\
                          np.all(self.s2_boa > 0, axis = 0) &\
                          np.all(self.s2_boa < 1, axis = 0)
        toa_mask =       (~self.bad_pixs) &\
                          np.all(self.s2_toa > 0, axis = 0) &\
                          np.all(self.s2_toa < 1, axis = 0)
        self.s2_mask = boa_mask & toa_mask & qua_mask
        self.Hx = self.Hx[self.s2_mask]
        self.Hy = self.Hy[self.s2_mask]
        self.s2_toa = self.s2_toa[:, self.s2_mask]
        self.s2_boa = self.s2_boa[:, self.s2_mask]
        self.s2_boa_qa = self.s2_boa_qa[:, self.s2_mask]
        self.brdf_stds = self.brdf_stds[:, self.s2_mask]
        self.s2_boa_unc = grab_uncertainty(self.s2_boa, self.boa_bands,
                                           self.s2_boa_qa,
                                           self.brdf_stds).get_boa_unc()
        self.s2_logger.info('Solving...')
        self.aero = solving_atmo_paras(
            self.s2_boa, self.s2_toa, self.sza, self.vza, self.saa, self.vaa,
            self.aot, self.tcwv, self.tco3, self.elevation, self.aot_unc,
            self.tcwv_unc, self.tco3_unc, self.s2_boa_unc, self.Hx, self.Hy,
            self.full_res, self.aero_res, self.emus, self.band_indexs,
            self.boa_bands)
        solved = self.aero._optimization()
        return solved
Esempio n. 39
0
    def get_result(self,
                   method,
                   imgNames,
                   true_label,
                   predict_score,
                   path,
                   minThreshold=-1):

        # Getting predicted scores
        predict = np.array(predict_score)
        if (len(predict.shape) == 2):
            predict = predict[:, 1]
        elif (len(predict.shape) == 3):
            predict = predict[:, :, 1]

        # Normalization of scores in [0,1]
        predictScore = (predict - min(predict)) / (max(predict) - min(predict))
        print('Max Score:' + str(max(predict)))
        print('Min Score:' + str(min(predict)))

        # Saving image or video name with match score
        if imgNames != 'None':
            imgNameScore = []
            for i in range(len(imgNames)):
                imgNameScore.append(
                    [imgNames[i], true_label[i], predictScore[i]])
            with open(os.path.join(path, method + '_Match_Scores.csv'),
                      'w',
                      newline='') as fout:
                writer = csv.writer(fout)
                writer.writerows(imgNameScore)

        # Histogram plot
        live = []
        [
            live.append(predictScore[i]) for i in range(len(true_label))
            if (true_label[i] == 0)
        ]
        spoof = []
        [
            spoof.append(predictScore[j]) for j in range(len(true_label))
            if (true_label[j] == 1)
        ]
        bins = np.linspace(np.min(np.array(spoof + live)),
                           np.max(np.array(spoof + live)), 60)
        plt.figure()
        plt.hist(live,
                 bins,
                 alpha=0.5,
                 label='Bonafide',
                 density=True,
                 edgecolor='black',
                 facecolor='g')
        plt.hist(spoof,
                 bins,
                 alpha=0.5,
                 label='PA',
                 density=True,
                 edgecolor='black',
                 facecolor='r')
        plt.legend(loc='upper right', fontsize=15)
        plt.xlabel('Scores')
        plt.ylabel('Frequency')
        plt.savefig(os.path.join(path, method + "_Histogram.jpg"))

        # Plot ROC curves in semilog scale
        (fprs, tprs, thresholds) = roc_curve(true_label, predictScore)
        plt.figure()
        plt.semilogx(fprs, tprs, label=method)
        plt.grid(True, which="major")
        plt.legend(loc='lower right', fontsize=15)
        plt.yticks(np.arange(0, 1.1, 0.1))
        plt.xticks([0.001, 0.01, 0.1, 1])
        plt.xlabel('False Detection Rate')
        plt.ylabel('True Detection Rate')
        plt.xlim((0.0005, 1.01))
        plt.ylim((0, 1.02))
        plt.plot([0.002, 0.002], [0, 1], color='#A0A0A0', linestyle='dashed')
        plt.plot([0.001, 0.001], [0, 1], color='#A0A0A0', linestyle='dashed')
        plt.plot([0.01, 0.01], [0, 1], color='#A0A0A0', linestyle='dashed')
        plt.savefig(os.path.join(path, method + "_ROC.jpg"))

        #Plot Raw ROC curves
        plt.figure()
        plt.plot(fprs, tprs)
        plt.grid(True, which="major")
        plt.legend(method, loc='lower right', fontsize=15)
        plt.yticks(np.arange(0, 1.1, 0.1))
        plt.xticks([0.01, 0.1, 1])
        plt.xlabel('False Detection Rate')
        plt.ylabel('True Detection Rate')
        plt.xlim((0.0005, 1.01))
        plt.ylim((0, 1.02))
        plt.savefig(os.path.join(path, method + "_RawROC.jpg"))

        # Calculation of TDR at 0.2% , 0.1% and  5% FDR
        with open(os.path.join(path, method + '_TDR-ACER.csv'),
                  mode='w+') as fout:
            fprArray = [0.002, 0.001, 0.01, 0.05]
            for fpr in fprArray:
                tpr = np.interp(fpr, fprs, tprs)
                threshold = self.get_threshold(fprs, thresholds, fpr)
                fout.write("TDR @ FDR, threshold: %f @ %f ,%f\n" %
                           (tpr, fpr, threshold))
                print("TDR @ FDR, threshold: %f @ %f ,%f " %
                      (tpr, fpr, threshold))

        # Calculation of APCER, BPCER and ACER
            if minThreshold == -1:
                minACER = 1000
                for thresh in pl.frange(0, 1, 0.025):
                    APCER = np.count_nonzero(np.less(spoof,
                                                     thresh)) / len(spoof)
                    BPCER = np.count_nonzero(np.greater_equal(
                        live, thresh)) / len(live)
                    ACER = (APCER + BPCER) / 2
                    if ACER < minACER:
                        minThreshold = thresh
                        minAPCER = APCER
                        minBPCER = BPCER
                        minACER = ACER
                fout.write(
                    "APCER and BPCER @ ACER, threshold: %f and %f @ %f, %f\n" %
                    (minAPCER, minBPCER, minACER, minThreshold))
                print(
                    "APCER and BPCER @ ACER, threshold: %f and %f @ %f, %f\n" %
                    (minAPCER, minBPCER, minACER, minThreshold))
            else:
                APCER = np.count_nonzero(np.less(spoof,
                                                 minThreshold)) / len(spoof)
                BPCER = np.count_nonzero(np.greater_equal(
                    live, minThreshold)) / len(live)
                ACER = (APCER + BPCER) / 2
                fout.write(
                    "APCER and BPCER @ ACER, threshold: %f and %f @ %f, %f\n" %
                    (APCER, BPCER, ACER, minThreshold))
                print(
                    "APCER and BPCER @ ACER, threshold: %f and %f @ %f, %f\n" %
                    (APCER, BPCER, ACER, minThreshold))

        # Calculation of Confusion matrix
        #threshold = self.get_threshold(fprs, thresholds, 0.002)
        predict = predictScore >= minThreshold
        predict_label = []
        [predict_label.append(int(predict[i])) for i in range(len(predict))]
        conf_matrix = confusion_matrix(
            true_label, predict_label)  # 0 for live and 1 for spoof
        print(conf_matrix)

        # Plot non-normalized confusion matrix
        np.set_printoptions(precision=2)
        class_names = ['0', '1']
        self.plot_confusion_matrix(conf_matrix,
                                   method,
                                   path,
                                   classes=class_names,
                                   normalize=False)

        # Saving evaluation measures
        pickle.dump((fprs, tprs, minThreshold, tpr, fpr, conf_matrix),
                    open(os.path.join(path, method + ".pickle"), "wb"))
        errorIndex = []
        [
            errorIndex.append(i) for i in range(len(true_label))
            if true_label[i] != predict_label[i]
        ]
        return errorIndex, predictScore, minThreshold
Esempio n. 40
0
def c2_tot_lj(z, ell, j):
    dist_s = distance(z)
    k_var = ((ell / dist_s)**2 + (2 * 3.14 * j / dist_s)**2)**0.5
    return pc2tot_s1(np.interp(k_var, k_file, p_file), k_var, z) / D2_L
Esempio n. 41
0
 elif city == 'Yogyakarta':
     time = time_others
     site_class = 'C'
     data_index = 4
 elif city == 'Surabaya':
     time = time_others
     site_class = 'D'
     data_index = 5
 # Calculate incremental rate
 hazard_curve_2017 = numpy.array([gm, data_list[0][:, data_index]])
 hazard_curve_2010 = numpy.array([gm, data_list[1][:, data_index]])
 # interpolate hazard curve
 interpolate = False
 if interpolate:
     x_values = numpy.power(10, numpy.arange(-4, 1.7, 0.1))
     hazard_curve_interp = numpy.interp(x_values, gm, data[:, 1])
     hazard_curve = numpy.array([x_values, hazard_curve_interp])
 plot_hazard_curve(hazard_curve_2010)
 plot_hazard_curve(hazard_curve_2017)
 haz_mmi_dict_list = []
 for hazard_curve in [hazard_curve_2010, hazard_curve_2017]:
     incremental_hazard_curve = calculate_incremental_rates(
         hazard_curve)
     realisations = \
         simulate_gm_realisations(incremental_hazard_curve,
                                  time, 100000)
     #Convert to MMI including uncertainty
     #Remove site effects first
     # i.e. for each realisation we want to consider
     # how we might observe it
     gm_site_effects = gm_site_amplification(hazard_curve[0], period,
Esempio n. 42
0
def calibfcn(x, pltstring = '--'):
    error = 0
    
    for loadcase in [1]:
        #Read experimental data from mat files
        if loadcase is 0:
            data_file = '/home/viktor/projects/Puffin/inputfiles/calibration_crypla/data/5compDirTest_010.mat'
            eul2 = 'Materials/elasticity_tensor/euler_angle_1=90 Materials/elasticity_tensor/euler_angle_2=90 Materials/elasticity_tensor/euler_angle_3=0' #changes the rotation of crystal to 001 along loading in input file            
            # eul2 = 'Materials/elasticity_tensor/euler_angle_1=60 Materials/elasticity_tensor/euler_angle_2=90' #changes the rotation of crystal to 001 along loading in input file
        elif loadcase is 1:
            data_file = '/home/viktor/projects/Puffin/inputfiles/calibration_crypla/data/5compDirTest_100.mat'
            eul2 = 'Materials/elasticity_tensor/euler_angle_1=180 Materials/elasticity_tensor/euler_angle_2=90 Materials/elasticity_tensor/euler_angle_3=90' #changes the rotation of crystal to 100 along loading in input file
            # eul2 = 'Materials/elasticity_tensor/euler_angle_1=45 Materials/elasticity_tensor/euler_angle_2=60' #changes the rotation of crystal to 110 along loading in input file
        data = sio.loadmat(data_file)
        strain_exp = data['xx'][:,0]
        stress_exp = data['yy'][:,0]*1e-6 #in MPa

        #Set up moose input file
        inputfile = '1element_calib.i '
        # names of properties to calibrate
        slip_rate_props_name = 'UserObjects/slip_rate_gss/flowprops='
        state_var_props_name = 'UserObjects/state_var_gss/group_values='
        state_var_rate_h0_name = 'UserObjects/state_var_evol_rate_comp_gss/h0_group_values='
        state_var_rate_tauSat_name = 'UserObjects/state_var_evol_rate_comp_gss/tauSat_group_values='
        state_var_rate_hardeningExponent_name = 'UserObjects/state_var_evol_rate_comp_gss/hardeningExponent_group_values='

        # initial values (from Darbandi2012)
        slip_rate_props_vals = [1, 32, 0.001, 0.05] #start_ss end_ss gamma0 1/m m = 20??
        state_var_props_vals = [x[6]] * 6# initial slip resistance values of each ss
        state_var_rate_h0_vals =  [x[0], x[1], x[2], x[3], x[4], x[5]] #  h0 of each ss
        state_var_rate_tauSat_vals = [x[7], x[8], x[9], x[10], x[11], x[12]] # assume different saturation values
        state_var_rate_hardeningExponent_vals = [x[13]]*6 #[x[13], x[14], x[15], x[16], x[17], x[18]] # the hardening exponent c

        slip_rate_props = '\''+" ".join(str(x) for x in slip_rate_props_vals)+'\' '
        state_var_props = '\''+" ".join(str(x) for x in state_var_props_vals)+'\' '
        state_var_rate_h0 = '\''+" ".join(str(x) for x in state_var_rate_h0_vals)+'\' '
        state_var_rate_tauSat = '\''+" ".join(str(x) for x in state_var_rate_tauSat_vals)+'\' '
        state_var_rate_hardeningExponent = '\''+" ".join(str(x) for x in state_var_rate_hardeningExponent_vals)+'\' '


        #Run moose simulation
        print 'Load case:', loadcase
        print "\033[94mCurrent material parameters [MPa]:" + "\033[94m{}\033[0m".format(x*160.217662)
        print "\033[95mCurrent material parameters:" + "\033[95m{}\033[0m".format(x)
        runcmd = 'mpirun -n 1 ../../puffin-opt -i ' + inputfile + slip_rate_props_name + slip_rate_props + state_var_props_name + state_var_props + state_var_rate_h0_name + state_var_rate_h0 + state_var_rate_tauSat_name + state_var_rate_tauSat + state_var_rate_hardeningExponent_name + state_var_rate_hardeningExponent + eul2 + ' > mooselog.txt'
        print 'Running this command:\n' + runcmd + "\033[0m"
        call(runcmd, shell=True)

        #Get stress strain curve from csv file
        # aa = np.recfromcsv('calibrationSn.csv')
        aa = np.loadtxt('calibrationSn.csv',delimiter = ',', skiprows = 1)
        # idx = (np.abs(-aa[:,-3] - 0.12)).argmin()
        #idx = -1
        strain_sim = -aa[:,-3] #eps_yy
        stress_sim = -aa[:,-1]*160.217662 #sigma_yy in MPa (compression positive)

        if np.max(strain_sim) < 0.048: #this means the simulation failed ???
            error += 20
        else:
            #Interpolate experimental values to simulated times
            stress_exp_interp = np.interp(strain_sim,strain_exp,stress_exp)
            #Calculate error
            error += np.linalg.norm((stress_sim-stress_exp_interp)/stress_exp_interp)

        if loadcase is 0:
            # error = np.linalg.norm((stress_sim-stress_exp_interp)/stress_exp_interp)
            ax1.plot(strain_exp,stress_exp,'ko')
            ax1.plot(strain_sim,stress_sim,pltstring)
        elif loadcase is 1:
            ax2.plot(strain_exp,stress_exp,'ko')
            ax2.plot(strain_sim,stress_sim,pltstring)

        plt.pause(0.05)

    print "\033[91mError is: \033[00m"+"\033[91m {}\033[00m".format(error)

    return error
Esempio n. 43
0
    def _match_to_arcs(self, route, trajectory, pandas=True):
        """

        :return:
        """

        arcs = pd.DataFrame(columns=[
            'route_order', 'leg_order', 'edge_order', 'osm_id',
            'osm_source_id', 'osm_target_id', 'bbid_id', 'map_length'
        ])
        if not route['df'].empty:
            arcs['route_order'] = route['df'].route.values  # rix,
            arcs['leg_order'] = route['df'].leg.values  # lix,
            arcs['edge_order'] = route['df'].index  # eix,
            arcs['osm_id'] = route['df'].way.values
            arcs['osm_source_id'] = route['df'].source.values
            arcs['osm_target_id'] = route['df'].target.values
            arcs['start_lat'] = self._network.nodes(
                ids=arcs.osm_source_id.to_list(), columns=['latitude']).values
            arcs['start_lon'] = self._network.nodes(
                ids=arcs.osm_source_id.to_list(), columns=['longitude']).values
            arcs['end_lat'] = self._network.nodes(
                ids=arcs.osm_source_id.to_list(), columns=['latitude']).values
            arcs['end_lon'] = self._network.nodes(
                ids=arcs.osm_source_id.to_list(), columns=['longitude']).values
            arcs['bbid_id'] = ''
            arcs['map_length'] = route['df'].length  #osrm_edge_length

            for ix, group in arcs.groupby(['route_order']):
                group_datetimes = trajectory.loc[trajectory.matching_index ==
                                                 group.route_order.values[0],
                                                 'datetime'].values
                trace = self._trace.loc[
                    (group_datetimes[0] <= self._trace.datetime)
                    & (group_datetimes[-1] >= self._trace.datetime)]
                trace = trace.set_index(trace.index.values -
                                        trace.index.values[0])
                trace.time = trace.time.values - trace.time.values[0]

                # map_distance = np.insert(route['df'].loc[
                #                              route['df'].route == group.route_order.values[
                #                                  0], 'osrm_edge_length'].cumsum().values, 0, 0.0)
                map_distance = np.insert(
                    route['df'].loc[route['df'].route ==
                                    group.route_order.values[0],
                                    'length'].cumsum().values, 0, 0.0)
                meas_distance = sp.integrate.cumtrapz(x=trace.time,
                                                      y=trace.speed,
                                                      initial=0.0)
                factor = (map_distance[-1] - map_distance[0]) / (
                    meas_distance[-1] - meas_distance[0])
                meas_distance = meas_distance * factor
                node_times = np.interp(map_distance, meas_distance,
                                       trace.index.total_seconds())
                arcs.loc[group.index,
                         'start_time'] = group_datetimes[0] + pd.to_timedelta(
                             node_times[:-1], unit='S')
                arcs.loc[group.index,
                         'end_time'] = group_datetimes[0] + pd.to_timedelta(
                             node_times[1:] - 1.0e-9, unit='S')
                arcs.loc[group.index, 'distance_factor'] = factor

        # add unmatched of low confidence segments
        for ix, group in trajectory.groupby(['matching_index']):
            if group.matching_index.iloc[0] not in arcs.route_order.unique():
                arcs = arcs.append(
                    {
                        'route_order': group.matching_index.iloc[0],
                        'start_time': group.datetime.iloc[0],
                        'end_time': group.datetime.iloc[-1],
                        'leg_order': None,
                        'edge_order': None,
                        'osm_id': None,
                        'osm_source_id': None,
                        'osm_target_id': None,
                        'bbid_id': '',
                        'map_length': None,
                        'start_lat': group.gps_latitude.iloc[0],
                        'start_lon': group.gps_longitude.iloc[0],
                        'end_lat': group.gps_latitude.iloc[-1],
                        'end_lon': group.gps_longitude.iloc[-1],
                        'distance_factor': 1.0
                    },
                    ignore_index=True)

        arcs.start_time = arcs.start_time.dt.tz_localize('UTC')
        arcs.end_time = arcs.end_time.dt.tz_localize('UTC')
        arcs = arcs.sort_values(by=['start_time'])
        arcs = arcs.reset_index(drop=True)

        # if pandas:
        #     return pd.DataFrame(arcs)
        # else:
        return arcs
Esempio n. 44
0
    def from_lasio_curve(cls,
                         curve,
                         depth=None,
                         basis=None,
                         start=None,
                         stop=None,
                         step=0.1524,
                         run=-1,
                         null=-999.25,
                         service_company=None,
                         date=None):
        """
        Makes a curve object from a lasio curve object and either a depth
        basis or start and step information.

        Args:
            curve (ndarray)
            depth (ndarray)
            basis (ndarray)
            start (float)
            stop (float)
            step (float): default: 0.1524
            run (int): default: -1
            null (float): default: -999.25
            service_company (str): Optional.
            data (str): Optional.

        Returns:
            Curve. An instance of the class.
        """
        data = curve.data
        unit = curve.unit

        # See if we have uneven sampling.
        if depth is not None:
            d = np.diff(depth)
            if not np.allclose(d - np.mean(d), np.zeros_like(d)):
                # Sampling is uneven.
                step = np.nanmedian(d)
                start, stop = depth[0], depth[-1] + 0.00001  # adjustment
                basis = np.arange(start, stop, step)
                data = np.interp(basis, depth, data)
            else:
                step = np.nanmedian(d)
                start = depth[0]

        # Carry on with easier situations.
        if start is None:
            if basis is not None:
                start = basis[0]
                step = basis[1] - basis[0]
            else:
                raise CurveError("You must provide a basis or a start depth.")

        if step == 0:
            if stop is None:
                raise CurveError("You must provide a step or a stop depth.")
            else:
                step = (stop - start) / (curve.data.shape[0] - 1)

        # Interpolate into this.

        params = {}
        params['mnemonic'] = curve.mnemonic
        params['description'] = curve.descr
        params['start'] = start
        params['step'] = step
        params['units'] = unit
        params['run'] = run
        params['null'] = null
        params['service_company'] = service_company
        params['date'] = date
        params['code'] = curve.API_code

        return cls(data, params=params)
Esempio n. 45
0
def to_8bit(arr, min_target, max_target):
    return np.interp(arr, (min_target, max_target), (0, 255)).astype("uint8")
               
# Specify x-axis range, hide axes, add title and display plot
plt.xlim((0,256))
plt.grid('off')
plt.title('PDF & CDF (original image)')
plt.show()

# Load the image into an array: image
image = plt.imread('640px-Unequalized_Hawkes_Bay_NZ.jpg')

# Flatten the image into 1 dimension: pixels
pixels = image.flatten()

# Generate a cumulative histogram
cdf, bins, patches = plt.hist(pixels, bins=256, range=(0,256), normed=True, cumulative=True)
new_pixels = np.interp(pixels, bins[:-1], cdf*255)

# Reshape new_pixels as a 2-D array: new_image
new_image = new_pixels.reshape(image.shape)

# Display the new image with 'gray' color map
plt.subplot(2,1,1)
plt.title('Equalized image')
plt.axis('off')
plt.imshow(new_image, cmap='gray')

# Generate a histogram of the new pixels
plt.subplot(2,1,2)
pdf = plt.hist(new_pixels, bins=64, range=(0,256), normed=False,
               color='red', alpha=0.4)
plt.grid('off')
Esempio n. 47
0
File: base.py Progetto: rdzotz/gimli
def interpExtrap(x, xp, yp):
    """numpy.interp interpolation function extended by linear extrapolation."""
    y = np.interp(x, xp, yp)
    y = np.where(x < xp[0], yp[0]+(x-xp[0])*(yp[0]-yp[1])/(xp[0]-xp[1]), y)
    return np.where(x > xp[-1], yp[-1]+(x-xp[-1])*(yp[-1]-yp[-2]) /
                    (xp[-1]-xp[-2]), y)
Esempio n. 48
0
def scale_to_range_filter(in_raster, min_target, max_target):
    return np.interp(in_raster, (in_raster.nanmin(), in_raster.nanmax()),
                     (min_target, max_target))
Esempio n. 49
0
 def __call__(self, value, clip=None):
     x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
     return np.ma.masked_array(np.interp(value, x, y))
originalFile = 'hpoints1.csv'
outFile ='scadCommands2.txt'


with open(originalFile, "rt") as fin:
     # split the first line which is the x
     line = fin.readline().rstrip()
     x1 = line.split(',')
    
      # split the second line which is the y
     line = fin.readline().rstrip()
     y1 = line.split(',')


y = np.arange(0.02,0.2,0.001)
x = np.interp(y,x1,y1)   

     
with open(outFile, "wt") as fout:
     fout.write('polygon(points = [')
     count = 0 # keep track of how many points we add to the pointLocationList
     
     pointLocationList = ''
     pointsList = ''
     
     
     # start at the origin

     np.insert(x,0,'0.02')
     np.insert(x,0,'0')
     
     [-155.427, 2247.910, 487.786, -0.413, 179.976, 0.000],
     [-923.420, 2248.823, 478.974, -0.290, 179.801, 0.000],
     [-1616.208, 2251.235, 470.393, -0.290, 179.801, 0.000],
     [-2693.185, 2254.983, 457.054, -0.290, 179.801, 0.000],
     [-3734.848, 2267.302, 451.761, 0.602, 178.032, 0.000],
     [-4077.070, 2276.225, 458.437, 1.122, 178.250, 0.000],
     [-4393.500, 2311.624, 465.688, 0.590, 130.252, 0.000],
     [-4623.441, 2665.899, 368.658, 6.102, 111.552, 0.000],
     [-4720.735, 2953.385, 411.037, 6.937, 99.380, 0.000]])

fps = 10
times = np.arange(0, camera_poses.shape[0] * fps, fps)
filled_times = np.arange(0, camera_poses.shape[0] * fps)

filtered_poses = np.array(
    [np.interp(filled_times, times, axis) for axis in camera_poses.T]).T


class UnrealcvStereo():
    def __init__(self):

        client.connect()
        if not client.isconnected():
            print(
                'UnrealCV server is not running. Run the game downloaded from http://unrealcv.github.io first.'
            )
            sys.exit(-1)

    def __str__(self):
        return client.request('vget /unrealcv/status')
Esempio n. 52
0
def normalized_cut_values(oil):
    f_res = f_asph = 0  # for now, we are including the resins and asphaltenes
    cuts = get_distillation_cuts(oil)
    oil_api = oil.metadata.API
    iBP = 266
    tBP = 1050

    if len(cuts) == 0:
        # should be a warning if api < 50 or not a crude
        oil_api = oil.metadata.API
        if oil.metadata.product_type != 'Crude Oil NOS':
            logger.error('warning: oil not recommended for use in Gnome')
            # print(WARNINGS['W007'] + "  - oil not recommended for use in Gnome")
        if oil_api < 0:
            raise ValueError(
                "Density is too large for estimations. Oil not suitable for use in Gnome"
            )

        # ADIOS2 method
        BP_i = est.cut_temps_from_api(oil_api)
        fevap_i = np.cumsum(est.fmasses_flat_dist(f_res, f_asph))
        # Robert's new method
        #iBP = 10/9 * (519.3728 - 3.6637 * oil_api) - 1015 / 9
        #tBP = 1015
        #BP_i = [iBP, tBP]
        #fevap_i = [0,1]
    else:
        BP_i, fevap_i = list(zip(*[(c[1], c[0]) for c in cuts]))
        N = len(BP_i)
        if not (fevap_i[1] == fevap_i[0]):
            iBP = BP_i[0] - fevap_i[0] * (BP_i[1] - BP_i[0]) / (fevap_i[1] -
                                                                fevap_i[0])
        if not (fevap_i[N - 1] == fevap_i[0]):
            tBP = BP_i[N - 1] + (1 - fevap_i[0]) * (BP_i[N - 1] - BP_i[0]) / (
                fevap_i[N - 1] - fevap_i[0])

    iBP = max(266, iBP)
    tBP = min(1050, tBP)

    set_temp = [266, 310, 353, 483, 563, 650, 800, 950, 1050]
    #set_temp = [266,310,353,423,483,523,543,563,650,800,950,1050]

    N = len(BP_i)

    new_fevap = fevap_i
    new_BP = BP_i
    if not fevap_i[N - 1] == 1:
        new_BP = np.append(BP_i, tBP)
        new_fevap = np.append(fevap_i, 1.0)
    if not fevap_i[0] == 0:
        new_BP = np.insert(new_BP, 0, iBP)
        new_fevap = np.insert(new_fevap, 0, 0)
    new_evap = np.interp(set_temp, new_BP, new_fevap)

    if new_evap[-1] < 1:
        new_evap[-1] = 1  # put all the extra mass in the last cut

    avg_evap_i = np.asarray(new_evap[1:])
    avg_temp_i = np.asarray([(set_temp[i] + set_temp[i + 1]) / 2
                             for i in range(0,
                                            len(set_temp) - 1)])

    avg_temp_i = avg_temp_i[avg_evap_i != 0]
    avg_evap_i = avg_evap_i[avg_evap_i != 0]

    num_ones = 0
    for i in range(len(avg_evap_i)):
        if avg_evap_i[i] == 1:
            num_ones += 1

    if num_ones > 1:
        for i in range(num_ones - 1):
            avg_temp_i = np.delete(avg_temp_i, len(avg_evap_i) - 1)
            avg_evap_i = np.delete(avg_evap_i, len(avg_evap_i) - 1)

    return np.asarray(avg_temp_i), est.fmasses_from_cuts(avg_evap_i)
Esempio n. 53
0
def discretize_profile(profile,
                       y_bins=None,
                       y_mid=None,
                       variables=None,
                       display_figure=False,
                       fill_gap=False,
                       fill_extremity=False):
    """
    :param profile:
    :param y_bins:
    :param y_mid:
    :param variables:
    :param display_figure: boolean, default False
    :param fill_gap: boolean, default True

    :param fill_extremity: boolean, default False
    :return:
        profile
    """
    logger = logging.getLogger(__name__)

    if profile.empty:
        logger.warning("Discretization impossible, empty profile")
        return profile
    else:
        if 'name' in profile.keys():
            logger.info("Processing %s" % profile.name.unique()[0])
        else:
            logger.info("Processing core")

    v_ref = profile.v_ref.unique()[0]

    # VARIABLES CHECK
    if y_bins is None and y_mid is None:
        y_bins = pd.Series(
            profile.y_low.dropna().tolist() +
            profile.y_sup.dropna().tolist()).sort_values().unique()
        y_mid = profile.y_mid.dropna().sort_values().unique()
        logger.info("y_bins and y_mid are empty, creating from profile")
    elif y_bins is None and y_mid is not None:
        logger.info("y_bins is empty, creating from given y_mid")
        y_mid = y_mid.sort_values().values
        dy = np.diff(y_mid) / 2
        y_bins = np.concatenate([[y_mid[0] - dy[0]], y_mid[:-1] + dy,
                                 [y_mid[-1] + dy[-1]]])
        if y_bins[0] < 0:
            y_bins[0] = 0
    else:
        y_mid = np.diff(y_bins) / 2 + y_bins[:-1]
        logger.info("y_mid is empty, creating from given y_bins")

    y_bins = np.array(y_bins)
    y_mid = np.array(y_mid)

    if variables is None:
        variables = [
            variable for variable in profile.variable.unique().tolist()
            if variable in profile.keys()
        ]

    if not isinstance(variables, list):
        variables = [variables]

    discretized_profile = pd.DataFrame()

    for variable in variables:
        if profile[profile.variable == variable].empty:
            logger.debug("\t %s profile is missing" % variable)
        else:
            logger.debug("\t %s profile is discretized" % variable)

            # continuous profile (temperature-like)
            if is_continuous_profile(profile[profile.variable == variable]):
                yx = profile.loc[profile.variable == variable,
                                 ['y_mid', variable]].set_index(
                                     'y_mid').sort_index()
                y2 = y_mid
                x2 = np.interp(y2,
                               yx.index,
                               yx[variable],
                               left=np.nan,
                               right=np.nan)

                y2x = pd.DataFrame(x2, columns=[variable], index=y2)
                for index in yx.index:
                    y2x.loc[abs(y2x.index - index) < 1e-6,
                            variable] = yx.loc[yx.index == index,
                                               variable].values

                # compute weight, if y_mid is in min(yx) < y_mid < max(yx)
                w = [
                    1 if yx.index[0] - TOL <= y <= yx.index[-1] + TOL else 0
                    for y in y_mid
                ]

                # add the temperaure profile extremum value
                if not any(abs(yx.index[0] - y2) < TOL):
                    y2x.loc[yx.index[0],
                            variable] = yx.loc[yx.index == yx.index[0],
                                               variable].values
                    w = w + [0]
                if not any(abs(yx.index[-1] - y2) < TOL):
                    y2x.loc[yx.index[-1],
                            variable] = yx.loc[yx.index == yx.index[-1],
                                               variable].values
                    w = w + [0]

                temp = pd.DataFrame(columns=profile.columns.tolist(),
                                    index=range(y2x.__len__()))
                temp.update(
                    y2x.reset_index().rename(columns={'index': 'y_mid'}))
                temp['weight'] = pd.Series(w, index=temp.index)
                temp = temp.sort_values('y_mid').reset_index()
                profile_prop = profile.loc[profile.variable == variable].head(
                    1)
                profile_prop = profile_prop.drop(variable, 1)
                profile_prop['variable'] = variable
                if 'y_low' in profile_prop:
                    profile_prop = profile_prop.drop('y_low', 1)
                profile_prop = profile_prop.drop('y_mid', 1)
                if 'y_sup' in profile_prop:
                    profile_prop = profile_prop.drop('y_sup', 1)

                temp.update(
                    pd.DataFrame([profile_prop.iloc[0].tolist()],
                                 columns=profile_prop.columns.tolist(),
                                 index=temp.index.tolist()))
                if 'date' in temp:
                    temp['date'] = temp['date'].astype('datetime64[ns]')

                if display_figure:
                    plt.figure()
                    yx = yx.reset_index()
                    plt.plot(yx[variable], yx['y_mid'], 'k')
                    plt.plot(temp[variable], temp['y_mid'], 'xr')
                    if 'name' in profile_prop.keys():
                        plt.title(profile_prop.name.unique()[0] + ' - ' +
                                  variable)
                    plt.show()
            # step profile (salinity-like)
            else:
                if v_ref == 'bottom':
                    # yx = profile[profile.variable == variable].set_index('y_mid', drop=False).sort_index().as_matrix(
                    #     ['y_sup', 'y_low', variable])
                    yx = profile.loc[profile.variable == variable,
                                     ['y_sup', 'y_low', variable]].sort_values(
                                         by='y_low').values
                    if yx[0, 0] > yx[0, 1]:
                        # yx = profile[profile.variable == variable].set_index('y_mid', drop=False).sort_index().as_matrix(
                        #     ['y_low', 'y_sup', variable])
                        yx = profile.loc[
                            profile.variable == variable,
                            ['y_low', 'y_sup', variable]].sort_values(
                                by='y_sup').values
                else:
                    # yx = profile[profile.variable == variable].set_index('y_mid', drop=False).sort_index().as_matrix(
                    #     ['y_low', 'y_sup', variable])
                    yx = profile.loc[profile.variable == variable,
                                     ['y_sup', 'y_low', variable]].sort_values(
                                         by='y_low').values

                # if missing section, add an emtpy section with np.nan as property value
                yx_new = []
                for row in range(yx[:, 0].__len__() - 1):
                    yx_new.append(yx[row])
                    if abs(yx[row, 1] - yx[row + 1, 0]) > TOL:
                        yx_new.append([yx[row, 1], yx[row + 1, 0], np.nan])
                yx_new.append(yx[row + 1, :])
                yx = np.array(yx_new)
                del yx_new

                if fill_gap:
                    value = pd.Series(yx[:, 2])
                    value_low = value.fillna(method='ffill')
                    value_sup = value.fillna(method='bfill')

                    ymid = pd.Series(yx[:, 0] + (yx[:, 1] - yx[:, 0]) / 2)
                    ymid2 = pd.Series(None, index=value.index)
                    ymid2[np.isnan(value)] = ymid[np.isnan(value)]

                    dy = pd.DataFrame(yx[:, 0:2], columns=['y_low', 'y_sup'])
                    dy2 = pd.DataFrame([[None, None]],
                                       index=value.index,
                                       columns=['y_low', 'y_sup'])
                    dy2[~np.isnan(value)] = dy[~np.isnan(value)]
                    dy2w = dy2['y_low'].fillna(
                        method='bfill') - dy2['y_sup'].fillna(method='ffill')
                    new_value = value_low + (ymid2 - dy2['y_sup'].fillna(
                        method='ffill')) * (value_sup - value_low) / dy2w
                    value.update(new_value)

                    yx[:, 2] = value

                x_step = []
                y_step = []
                w_step = [
                ]  # weight of the bin, defined as the portion on which the property is define
                # yx and y_bins should be ascendent suit
                if (np.diff(y_bins) < 0).all():
                    logger.info("y_bins is descending reverting the list")
                    y_bins = y_bins[::-1]
                elif (np.diff(y_bins) > 0).all():
                    logger.debug("y_bins is ascending")
                else:
                    logger.info("y_bins is not sorted")
                if (np.diff(yx[:, 0]) < 0).all():
                    logger.info("yx is descending reverting the list")
                    yx = yx[::-1, :]
                elif (np.diff(yx[:, 0]) > 0).all():
                    logger.debug("yx is ascending")
                else:
                    logger.info("yx is not sorted")

                for ii_bin in range(y_bins.__len__() - 1):
                    a = np.flatnonzero((yx[:, 0] - y_bins[ii_bin] < -TOL)
                                       & (y_bins[ii_bin] - yx[:, 1] < -TOL))
                    a = np.concatenate(
                        (a,
                         np.flatnonzero((y_bins[ii_bin] - yx[:, 0] <= TOL) & (
                             yx[:, 1] - y_bins[ii_bin + 1] <= TOL))))
                    a = np.concatenate(
                        (a,
                         np.flatnonzero(
                             (yx[:, 0] - y_bins[ii_bin + 1] < -TOL)
                             & (y_bins[ii_bin + 1] - yx[:, 1] < -TOL))))
                    a = np.unique(a)
                    #print(y_bins[ii_bin], y_bins[ii_bin+1])
                    #print(a, yx[a])
                    #print()

                    if a.size != 0:
                        S = np.nan
                        L = 0
                        L_nan = 0
                        a_ii = 0
                        if yx[a[a_ii], 0] - y_bins[ii_bin] < -TOL:
                            S_temp = yx[a[a_ii],
                                        2] * (yx[a[a_ii], 1] - y_bins[ii_bin])
                            if not np.isnan(S_temp):
                                if np.isnan(S):
                                    S = S_temp
                                else:
                                    S += S_temp
                                L += (yx[a[a_ii], 1] - y_bins[ii_bin])
                            else:
                                L_nan += (yx[a[a_ii], 1] - y_bins[ii_bin])
                            #print(y_bins[ii_bin], yx[a[a_ii], 1], S_temp)
                            a_ii += 1
                        while ii_bin + 1 <= y_bins.shape[
                                0] - 1 and a_ii < a.shape[0] - 1 and yx[
                                    a[a_ii], 1] - y_bins[ii_bin + 1] < -TOL:
                            S_temp = yx[a[a_ii],
                                        2] * (yx[a[a_ii], 1] - yx[a[a_ii], 0])
                            if not np.isnan(S_temp):
                                if np.isnan(S):
                                    S = S_temp
                                else:
                                    S += S_temp
                                L += yx[a[a_ii], 1] - yx[a[a_ii], 0]
                            else:
                                L_nan += yx[a[a_ii], 1] - yx[a[a_ii], 0]
                            #print(yx[a[a_ii], 0], yx[a[a_ii], 1], S_temp)
                            a_ii += 1

                        # check if a_ii-1 was not the last element of a
                        if a_ii < a.size:
                            if yx[a[a_ii], 1] - y_bins[ii_bin + 1] > -TOL:
                                S_temp = yx[a[a_ii], 2] * (y_bins[ii_bin + 1] -
                                                           yx[a[a_ii], 0])
                                if not np.isnan(S_temp):
                                    if np.isnan(S):
                                        S = S_temp
                                    else:
                                        S += S_temp
                                    L += (y_bins[ii_bin + 1] - yx[a[a_ii], 0])
                                else:
                                    L_nan += (y_bins[ii_bin + 1] -
                                              yx[a[a_ii], 0])
                                #print(yx[a[a_ii], 0], y_bins[ii_bin+1], S_temp)
                            elif yx[a[a_ii], 1] - y_bins[ii_bin + 1] < -TOL:
                                S_temp = yx[a[a_ii], 2] * (yx[a[a_ii], 1] -
                                                           yx[a[a_ii], 0])
                                if not np.isnan(S_temp):
                                    if np.isnan(S):
                                        S = S_temp
                                    else:
                                        S += S_temp
                                    L += (yx[a[a_ii], 1] - yx[a[a_ii], 0])
                                else:
                                    L_nan += (yx[a[a_ii], 1] - yx[a[a_ii], 0])
                                # print(yx[a[a_ii], 0], yx[a[a_ii], 1], S_temp)

                        if L != 0:
                            S = S / L
                            w = L / (y_bins[ii_bin + 1] - y_bins[ii_bin])
                        elif L_nan != 0:
                            S = np.nan
                            w = 0
                        #print(L)
                        #print(S)
                        #print(w)
                        if yx[a[0],
                              0] - y_bins[ii_bin] > TOL and not fill_extremity:
                            y_step.append(yx[a[0], 0])
                            y_step.append(y_bins[ii_bin + 1])
                        elif yx[a[-1], 1] - y_bins[
                                ii_bin + 1] < -TOL and not fill_extremity:
                            y_step.append(y_bins[ii_bin])
                            y_step.append(yx[a[-1], 1])
                        else:
                            y_step.append(y_bins[ii_bin])
                            y_step.append(y_bins[ii_bin + 1])
                        x_step.append(S)
                        x_step.append(S)
                        w_step.append(w)

                temp = pd.DataFrame(
                    columns=profile.columns.tolist() + ['weight'],
                    index=range(np.unique(y_step).__len__() - 1))
                temp.update(
                    pd.DataFrame(
                        np.vstack(
                            (np.unique(y_step)[:-1], np.unique(y_step)[:-1] +
                             np.diff(np.unique(y_step)) / 2,
                             np.unique(y_step)[1:], [
                                 x_step[2 * ii]
                                 for ii in range(int(x_step.__len__() / 2))
                             ], w_step)).transpose(),
                        columns=[
                            'y_low', 'y_mid', 'y_sup', variable, 'weight'
                        ],
                        index=temp.index[0:np.unique(y_step).__len__() - 1]))

                # core attribute
                profile_prop = profile.loc[profile.variable == variable].head(
                    1)
                profile_prop['variable'] = variable
                profile_prop = profile_prop.drop('y_low', 1)
                profile_prop = profile_prop.drop('y_mid', 1)
                profile_prop = profile_prop.drop('y_sup', 1)
                profile_prop = profile_prop.drop(variable, axis=1)
                temp.update(
                    pd.DataFrame([profile_prop.iloc[0].tolist()],
                                 columns=profile_prop.columns.tolist(),
                                 index=temp.index.tolist()))
                if 'date' in temp:
                    temp['date'] = temp['date'].astype('datetime64[ns]')

                if display_figure:
                    plt.figure()
                    x = []
                    y = []
                    for ii in range(yx[:, 0].__len__()):
                        y.append(yx[ii, 0])
                        y.append(yx[ii, 1])
                        x.append(yx[ii, 2])
                        x.append(yx[ii, 2])
                    plt.step(x, y, 'bx', label='original')
                    plt.step(x_step,
                             y_step,
                             'ro',
                             linestyle='--',
                             label='discretized')
                    if 'name' in profile_prop.keys():
                        plt.title(profile_prop.name.unique()[0] + ' - ' +
                                  variable)
                    plt.legend()
                    plt.show()
            temp = temp.apply(pd.to_numeric, errors='ignore')

            discretized_profile = discretized_profile.append(temp)

    return discretized_profile
Esempio n. 54
0
 def extract(self, X):
     h, b = np.histogram(X.flatten(), self._num_bins, normed=True)
     cdf = h.cumsum()
     cdf = 255 * cdf / cdf[-1]
     return np.interp(X.flatten(), b[:-1], cdf).reshape(X.shape)
def stroke_average_matrix(d, tstroke=1.0, t1=None, t2=None):
    # start time of data
    if t1 is None:
        t1 = d[0, 0]

    # end time of data
    if t2 is None:
        t2 = d[-1, 0]

    # will there be any strokes at all?
    if t2 - t1 < tstroke:
        print(
            'warning: no complete stroke present, not returning any averages')

    if t1 - np.round(t1) >= 1e-3:
        print('warning: data does not start at full stroke (tstart=%f)' % t1)

    # allocate stroke average matrix
    nt, ncols = d.shape
    navgs = np.int(np.floor((t2 - t1) / tstroke))
    D = np.zeros([navgs, ncols])
    # running index of strokes
    istroke = 0

    # we had some trouble with float equality, so be a little tolerant
    dt = np.mean(d[1:, 0] - d[:-1, 0])

    # go in entire strokes
    while t1 + tstroke <= t2 + dt:
        # begin of this stroke
        tbegin = t1
        # end of this stroke
        tend = t1 + tstroke
        # iterate
        t1 = tend

        # find index where stroke begins:
        i = np.argmin(abs(d[:, 0] - tbegin))
        # find index where stroke ends
        j = np.argmin(abs(d[:, 0] - tend))

        # extract time vector
        time = d[i:j + 1, 0]
        # replace first and last time instant with stroke begin/endpoint to avoid being just to dt close
        time[0] = tbegin
        time[-1] = tend

        #        print('t1=%f t2=%f i1 =%i i2=%i %f %f' % (tbegin, tend, i, j, d[i,0], d[j,0]))

        # actual integration. see wikipedia :)
        # the integral f(x)dx over x2-x1 is the average of the function on that
        # interval. note this script is more precise than the older matlab versions
        # as it is, numerically, higher order. the results are however very similar
        # (below 1% difference)
        for col in range(0, ncols):
            # use interpolation, but actually only for first and last point of a stroke
            # the others are identical as saved in the data file
            dat = np.interp(time, d[:, 0], d[:, col])

            D[istroke, col] = np.trapz(dat, x=time) / (tend - tbegin)

        istroke = istroke + 1
    return D
Esempio n. 56
0
 def template_QR(self,l,integrand):
     '''
     Interpolates the Hankel transformed R(k), Q(k) back onto self.k
     '''
     kQR, QR = self.sphr.sph(l,integrand)
     return np.interp(self.k, kQR, QR)
Esempio n. 57
0
def interpolate(x, y, total_len, samples=None):
    if samples is None:
        samples = total_len
    xvals = np.linspace(0, total_len, samples)
    yvals = np.interp(xvals, x, y)
    return xvals, yvals
Esempio n. 58
0
# make simulation and observation results comparable by mapping simulation results onto
# observation times and sizes (the latter by interpolation)
dNdD_interp = np.zeros((dNdDp.shape[0], dNdDp.shape[1]))

# observation diameters (um)
obss = obsDp * 1.e-3

for it in range(len(obst) - 1):  # loop through observation time steps
    # observation time (hours)
    obstn = obst[it] / 3600.

    # index of simulation result at this time
    simi = np.where(time_array == ((time_array[time_array >= obstn])[0]))[0]

    # fill model matrix - convert simulation radii to diameter
    dNdD_interp[it, :] = np.interp(obss[0, :], np.squeeze(xfm[simi, :] * 2.),
                                   np.squeeze(dNdD[simi, :]))

# customised colormap (https://www.rapidtables.com/web/color/RGB_Color.html)
colors = [(0.60, 0.0, 0.70), (0, 0, 1), (0, 1.0, 1.0), (0, 1.0, 0.0),
          (1.0, 1.0, 0.0), (1.0, 0.0, 0.0)]  # R -> G -> B
n_bin = 100  # discretizes the colormap interpolation into bins
cmap_name = 'my_list'
# Create the colormap
cm = LinearSegmentedColormap.from_list(cmap_name, colors, N=n_bin)

# set contour levels
levels = (MaxNLocator(nbins=100).tick_values(np.min(dNdDp), np.max(dNdDp)))

# associate colours and contour levels
norm1 = BoundaryNorm(levels, ncolors=cm.N, clip=True)
p1 = ax0.contour(obst[:, 0] / 3600.,
""" Theoretical spectrum of atmospheric charged-current background: """
# Neutrino energy in MeV from table 3 from paper 1-s2.0-S0927650505000526-main:
E1_atmo = np.array([
    0, 13, 15, 17, 19, 21, 24, 27, 30, 33, 38, 42, 47, 53, 60, 67, 75, 84, 94,
    106, 119, 133, 150, 168, 188
])

# differential flux in energy for no oscillation for electron-antineutrinos for solar average at the site of Super-K,
# in (MeV**(-1) * cm**(-2) * s**(-1)):
flux_atmo_NuEbar = 10**(-4) * np.array([
    0., 63.7, 69.7, 79.5, 84.2, 89.4, 95.0, 99.3, 103., 104., 101., 96.1, 83.5,
    65.9, 60.0, 56.4, 51.4, 46.3, 43.0, 37.2, 32.9, 28.8, 24.9, 21.3, 18.3
])
# linear interpolation of the simulated data above to get the differential neutrino flux corresponding to E1,
# differential flux of electron-antineutrinos in (MeV**(-1) * cm**(-2) * s**(-1)):
Flux_atmo_NuEbar = np.interp(E1, E1_atmo, flux_atmo_NuEbar)

# differential flux in energy for no oscillation for muon-antineutrinos for solar average at the site of Super-K,
# in (MeV**(-1) * cm**(-2) * s**(-1)):
flux_atmo_NuMubar = 10**(-4) * np.array([
    0., 116., 128., 136., 150., 158., 162., 170., 196., 177., 182., 183., 181.,
    155., 132., 123., 112., 101., 92.1, 82.2, 72.5, 64.0, 55.6, 47.6, 40.8
])
# linear interpolation of the simulated data above to get the differential neutrino flux corresponding to E1,
# differential flux of muon-antineutrinos in (MeV**(-1) * cm**(-2) * s**(-1)):
Flux_atmo_NuMubar = np.interp(E1, E1_atmo, flux_atmo_NuMubar)

# differential flux in energy for no oscillation for electron-neutrinos for solar average at the site of Super-K,
# in (MeV**(-1) * cm**(-2) * s**(-1)):
flux_atmo_NuE = 10**(-4) * np.array([
    0., 69.9, 74.6, 79.7, 87.4, 94.2, 101., 103., 109., 108., 107., 101., 88.5,
Esempio n. 60
0
    def weighted_quantile(self, values, quantiles, sample_weight=None):
        """Method to calculate weighted quantiles.

        This method is adapted from the "Completely vectorized numpy solution" answer from user
        Alleo (https://stackoverflow.com/users/498892/alleo) to the following stackoverflow question;
        https://stackoverflow.com/questions/21844024/weighted-percentile-using-numpy. This
        method is also licenced under the CC-BY-SA terms, as the original code sample posted
        to stackoverflow (pre February 1, 2016) was.

        Method is similar to numpy.percentile, but supports weights. Supplied quantiles should be
        in the range [0, 1]. Method calculates cumulative % of weight for each observation,
        then interpolates between these observations to calculate the desired quantiles. Null values
        in the observations (values) and 0 weight observations are filtered out before
        calculating.

        Parameters
        ----------
        values : pd.Series or np.array
            A dataframe column with values to calculate quantiles from.

        quantiles : None
            Weighted quantiles to calculate. Must all be between 0 and 1.

        sample_weight : pd.Series or np.array or None, default = None
            Sample weights for each item in values, must be the same lenght as values. If
            not supplied then unit weights will be used.

        Returns
        -------
        interp_quantiles : list
            List containing computed quantiles.

        Examples
        --------
        >>> x = CappingTransformer(capping_values={"a": [2, 10]})
        >>> quantiles_to_compute = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
        >>> computed_quantiles = x.weighted_quantile(values = [1, 2, 3], sample_weight = [1, 1, 1], quantiles = quantiles_to_compute)
        >>> [round(q, 1) for q in computed_quantiles]
        [1.0, 1.0, 1.0, 1.0, 1.2, 1.5, 1.8, 2.1, 2.4, 2.7, 3.0]
        >>>
        >>> computed_quantiles = x.weighted_quantile(values = [1, 2, 3], sample_weight = [0, 1, 0], quantiles = quantiles_to_compute)
        >>> [round(q, 1) for q in computed_quantiles]
        [2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]
        >>>
        >>> computed_quantiles = x.weighted_quantile(values = [1, 2, 3], sample_weight = [1, 1, 0], quantiles = quantiles_to_compute)
        >>> [round(q, 1) for q in computed_quantiles]
        [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.2, 1.4, 1.6, 1.8, 2.0]
        >>>
        >>> computed_quantiles = x.weighted_quantile(values = [1, 2, 3, 4, 5], sample_weight = [1, 1, 1, 1, 1], quantiles = quantiles_to_compute)
        >>> [round(q, 1) for q in computed_quantiles]
        [1.0, 1.0, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0]
        >>>
        >>> computed_quantiles = x.weighted_quantile(values = [1, 2, 3, 4, 5], sample_weight = [1, 0, 1, 0, 1], quantiles = [0, 0.5, 1.0])
        >>> [round(q, 1) for q in computed_quantiles]
        [1.0, 2.0, 5.0]

        """

        if sample_weight is None:
            sample_weight = np.ones(len(values))
        else:
            sample_weight = np.array(sample_weight)

        if np.isnan(sample_weight).sum() > 0:
            raise ValueError("null values in sample weights")

        if np.isinf(sample_weight).sum() > 0:
            raise ValueError("infinite values in sample weights")

        if (sample_weight < 0).sum() > 0:
            raise ValueError("negative weights in sample weights")

        if sample_weight.sum() <= 0:
            raise ValueError("total sample weights are not greater than 0")

        values = np.array(values)
        quantiles = np.array(quantiles)

        nan_filter = ~np.isnan(values)
        values = values[nan_filter]
        sample_weight = sample_weight[nan_filter]

        zero_weight_filter = ~(sample_weight == 0)
        values = values[zero_weight_filter]
        sample_weight = sample_weight[zero_weight_filter]

        sorter = np.argsort(values, kind="stable")
        values = values[sorter]
        sample_weight = sample_weight[sorter]

        weighted_quantiles = np.cumsum(sample_weight)
        weighted_quantiles = weighted_quantiles / np.sum(sample_weight)

        interp_quantiles = list(np.interp(quantiles, weighted_quantiles, values))

        return interp_quantiles