Example #1
0
def smoothfit(x, y, smooth=0, res=1000):
    """
    Smooth data of the form f(x) = y with a spline
    """
    z = y.copy()
    w = isnan(z)
    z[w] = 0
    spl = UnivariateSpline(x, z, w=~w)
    spl.set_smoothing_factor(smooth)
    xs = linspace(min(x), max(x), res)
    ys = spl(xs)
    ys[ys < 0] = 0
    if w[0]:
        if len(where(~w)[0]):
            first = where(~w)[0][0]
            first = x[first]
            first = where(xs >= first)[0][0] - 1
            ys[:first] = nan
    if w[-1]:
        if len(where(~w)[0]):
            last = where(~w)[0][-1]
            last = x[last]
            last = where(xs >= last)[0][0] + 1
            ys[last:] = nan
    return xs, ys
def grad_dist(curdata,ax):
    #gaussian kernel density estimation           
    x=np.linspace(0,90,91)
    kde=gaussian_kde(curdata)
    line,=ax.plot(x,kde(x), '-', label=hl[j][0:-2]+"0")
    curcolor=plt.getp(line,'color')

##    #plotting histogram
##    ax.hist(curdata,bins=int(max(curdata))+1,
##             normed=True, histtype='step',
##             color=curcolor, linewidth=0.5)

    #defining splines for kd function and corresponding 1st and seconde derivatives
    x=np.linspace(0,90,901)
    s=UnivariateSpline(x,kde(x),s=0,k=3)
    s1=UnivariateSpline(x,s(x,1),s=0,k=3)
    s2=UnivariateSpline(x,s1(x,1),s=0,k=3)

    #identifying local maxima (where s1=0, s2<0, and s>0.005)
    maxima=s1.roots()[np.where(s2(s1.roots())<0)[0]]
    maxima=maxima[np.where(s(maxima)>0.005)[0]]
    #s_max=maxima[-1]
    s_max=maxima[np.argmax(s(maxima))]
    ax.plot(s_max, s(s_max),'o', color=curcolor)

    #identifying steepest segment after maxima (where x>=maxima, s1<0)
    x2=x[np.where(x>=s_max)[0]]
    slope=s1(x2)
    s1_min=x2[np.argmin(slope)]
    ax.plot(s1_min,s(s1_min),'o', color=curcolor)

    print round(s_max,1),round(s1_min,1)

    return round(s_max,1),round(s1_min,1)
Example #3
0
def find_extrema_spline(x,y,k=4,s=0,**kwargs):
    """
    find local extrema of y(x) by taking derivative of spline

    Parameters
    ----------

    x,y : array-like
      find extrema of y(x)

    k : int
      order of spline interpolation [must be >3]

    s : number
      s parameter sent to scipy spline interpolation (used for smoothing)

    **kwargs : extra arguments to UnivariateSpline


    Returns
    -------
    sp : UnivariateSpline object

    x_max,y_max : array
      value of x and y at extrema(s)
    """


    sp = UVSpline(x,y,k=k,s=s,**kwargs)

    x_max = sp.derivative().roots()

    y_max = sp(x_max)

    return sp,x_max,y_max
Example #4
0
def fwhm(x, y, bg=[0, 100, 150, 240]):
    """
    Evaluates the full width half maximum of y in units of x.
  
    Parameters
    ----------
    x : numpy.array
    y : numpy.array
    bg : list
      Background sampling limits
  
    Returns
    -------
    fwhm : number
      Full width half maximum
    """

    #  xnew = copy(x)
    #  ynew = copy(y)

    #  xc = x[(x>bg[0])&(x<bg[1]) | (x>bg[2])&(x<bg[3])]
    #  yc = y[(x>bg[0])&(x<bg[1]) | (x>bg[2])&(x<bg[3])]

    #  bgfit = polyfit(xc,yc,1)

    #  ynew = ynew-polyval(bgfit,xnew)

    xnew, ynew = rmbg(x, y, bg)

    f = UnivariateSpline(xnew, ynew / max(ynew) - 0.5, s=0)
    fwhm = f.roots()[1] - f.roots()[0]

    return fwhm
Example #5
0
def integrated_rate_test(mx=100., annih_prod='BB'):
    # This currently doesn't work
    file_path = MAIN_PATH + "/Spectrum/"
    file_path += '{}'.format(int(mx)) + 'GeV_' + annih_prod + '_DMspectrum.dat'

    spectrum = np.loadtxt(file_path)
    imax = 0
    for i in range(len(spectrum)):
        if spectrum[i, 1] < 10 or i == (len(spectrum) - 1):
            imax = i
            break
    spectrum = spectrum[0:imax, :]
    Nevents = 10. ** 5.
    spectrum[:, 1] /= Nevents
    test = interp1d(np.log10(spectrum[:, 0] / mx), np.log10(mx * np.log(10.) * spectrum[:, 1]), kind='cubic', bounds_error=False, fill_value=0.)
    test2 = interp1d(spectrum[:, 0], spectrum[:, 0] * spectrum[:, 1], kind='cubic', bounds_error=False, fill_value=0.)
    e_gamma_tab = np.logspace(0., np.log10(spectrum[-1, 0]), 200)
    print np.column_stack((np.log10(spectrum[:, 0] / mx), np.log10(mx * np.log(10.) * spectrum[:, 1])))
    xtab = np.linspace(np.log10(1. / mx), 0., 200)
    ng2 = np.trapz(10.**test(xtab) / 10. ** xtab, xtab) / np.log(10.)
    mean_e2 = np.trapz(test2(e_gamma_tab), e_gamma_tab)
    rate_interp = UnivariateSpline(spectrum[:, 0], spectrum[:, 1])
    avg_e_interp = UnivariateSpline(spectrum[:, 0], spectrum[:, 0] * spectrum[:, 1])
    num_gamma = rate_interp.integral(1., spectrum[-1, 0])
    mean_e = avg_e_interp.integral(1., spectrum[-1, 0])


    print 'DM Mass: ', mx
    print 'Annihilation Products: ', annih_prod
    print 'Number of Gammas > 1 GeV: ', num_gamma, ng2
    print '<E> Gamma: ', mean_e, mean_e2

    return
Example #6
0
def fwhm(x, y, k=10, ret_roots=False):
    """
    Determine full-with-half-maximum of a peaked set of points, x and y.

    Assumes that there is only one peak present in the dataset.  The function
    uses a spline interpolation with smoothing parameter k ('s' in scipy.interpolate.UnivariateSpline).
    """

    class MultiplePeaks(Exception):
        pass

    class NoPeaksFound(Exception):
        pass

    half_max = np.max(y) / 2.0
    s = UnivariateSpline(x, y - half_max, s=k)
    roots = s.roots()

    if len(roots) > 2:
        # Multiple peaks. Use the two that straddle the maximum value
        maxvel = x[np.argmax(y)]
        left_idx = np.argmin(maxvel - roots)
        right_idx = np.argmin(roots - maxvel)
        roots = np.array((roots[left_idx], roots[right_idx]))
    elif len(roots) < 2:
        raise NoPeaksFound("No proper peaks were found in the data set; likely "
                           "the dataset is flat (e.g. all zeros).")
    if ret_roots:
        return roots[0], roots[1]

    return abs(roots[1] - roots[0])
Example #7
0
def calc_conductance_curve(V_list,T,R_T,C_sigma):
    #test_voltages = arange(-v_max,v_max,v_step)
    test_currents = []
    for V in V_list:
        test_currents.append(calc_current(V,T,R_T,C_sigma))
        #print "V: %g, current %g"%(V,test_currents[-1])

    ## calc conductances manually
    #test_conductances = []
    #for idx,V in enumerate (test_currents[1:-2]):
    #    if idx==0:
    #        print idx
    #    test_conductances.append((test_currents[idx+2]-test_currents[idx])/(2.0*v_step))
    #
    #test_voltages_G = test_voltages[1:-2]

    #
    # SPLINE
    #
    spline = UnivariateSpline(V_list,test_currents,s=0)
    #print "test_conductances"
    #indices = [x for x, y in enumerate(col1) if (y >0.7 or y<-0.7)]
    test_conductances = []
    for v_iter in V_list:
        test_conductances.append(spline.derivatives(v_iter)[1])
    return test_conductances
Example #8
0
def imageSlice(image, xc, yc, width):
	ylen, xlen = image.shape

	xstart = max(0, xc - width / 2)
	xend = min(xlen, xc + width / 2)

	# I cannot get array slicing to work for the life of me
	slice = []
	for i in range(int(xstart), int(xend)):
		slice.append(image[yc, i])

	# https://stackoverflow.com/questions/10582795/finding-the-full-width-half-maximum-of-a-peak/10583774#10583774

	shiftedSlice = []

	halfMax = max(slice) / 2
	baseline = numpy.mean(image)

	for y in slice:
		shiftedSlice.append(y - halfMax - baseline)

	x = numpy.linspace(0, width, width)
	spline = UnivariateSpline(x, shiftedSlice, s=0)
	r1, r2 = spline.roots()
	#r1 = 0
	#r2 = 0

	return (slice, r2 - r1)
Example #9
0
File: flux.py Project: lukawr/flux
def xs_interp (inp_ene, inp_xs, inp_ene_interp, plot_cs):

    inp_ene = inp_ene # energies from Talys
    inp_xs  = inp_xs  # xs from talys
    inp_ene_interps = inp_ene_interp # energies for interpolation
    out_xs_A = []
    out_xs = np.array([]) # iterpolated xs
    plot_fig = plot_cs
    
    x_ene = np.linspace (0,660,3301)
    
    spl = UnivariateSpline(inp_ene, inp_xs, s = 0.25)
    y_xs = spl(x_ene)

    for inp_ene_interp in inp_ene_interps:
        out_xs_A.append(spl.__call__(inp_ene_interp))
    
    out_xs = np.append(out_xs, out_xs_A)

    # optional_plot

    if plot_fig:
        plt.plot (inp_ene, inp_xs, 'ro', ms = 5)
        plt.plot (x_ene, y_xs, lw = 3, c = 'g', alpha = 0.6)
        plt.plot (inp_ene_interps, out_xs, 'o', ms = 3)
        plt.show()
    
    return out_xs
Example #10
0
    def response(self, disturbance_vector):
        """ Returns the response of the sensor due to a disturbance

            The acceleration imposed to the sensor is estimatad with the following equations:
                acceleration(t) = d2stress(t)/t2 * material_depth/material_prop['modulus']
            And the sensor response takes into account the frequency response, estimated by a normal curve
                freq_response(f) = norm(scale = self.bandwidth/2, loc=self.resonant_freq).pdf(f_array)
                freq_response(f) /= max(freq_response)
                response(t) = ifft(fft(acceleration) * freq_response)


            Args:
                disturbance_vector (list): list with a temporal array, in the 0 index, and a 
                stress array, in the 1 index.

            Returns:
                list: with two arrays, the temporal array and the voltage response array.
        """
        const = self.material_depth / self.material_prop['modulus']
        t_vector = disturbance_vector[0]
        # using the scipy UnivariateSpline to compute the second derivative
        data_spl = UnivariateSpline(t_vector, disturbance_vector[1], s=0, k=3)
        acceleration = data_spl.derivative(n=2)(t_vector) * const
        # we need to take the frequency response of the acceleration stimuli
        N = len(disturbance_vector[1])
        T = t_vector[1] - t_vector[0]
        f_array = np.fft.fftfreq(N, T)
        freq_acc = np.fft.fft(acceleration)
        # we need to apply a filter factor related to the frequency response of the sensor
        freq_response = self.frequency_response(N, (0, max(f_array)), mirror=True)[1]
        voltage = np.fft.ifft(freq_acc * freq_response) * self.sensitivity
        return voltage
def getCurvatureForPoints(arcLengthList, fx_s, fy_s, smoothing=None):
	x, x_, x__, y, y_, y__ = getFirstAndSecondDerivForTPoints(arcLengthList, fx_s, fy_s)
	curvature = abs(x_* y__ - y_* x__) / np.power(x_** 2 + y_** 2, 3 / 2)
	fCurvature = UnivariateSpline(arcLengthList, curvature, s=smoothing)
	dxcurvature = fCurvature.derivative(1)(arcLengthList)
	dx2curvature = fCurvature.derivative(2)(arcLengthList)
	return curvature, dxcurvature, dx2curvature
def kde_minmode(data,x,max_num_mode,min_mode_pdf):
    kde=gaussian_kde(data)
    f=kde.factor
    f_list=np.linspace(f,(data.max()-data.min()),100)
    s=UnivariateSpline(x,kde(x),s=0)
    s1=UnivariateSpline(x,s(x,1),s=0)
    s2=UnivariateSpline(x,s1(x,1),s=0)
    extrema=s1.roots()
    
    maxima=extrema[np.where((s2(extrema)<0)*(s(extrema)>=min_mode_pdf))]
    
    if len(maxima)>max_num_mode:
        for q in range(1,len(f_list)):
            f=f_list[q]
            kde2=gaussian_kde(data,bw_method=f)
            s=UnivariateSpline(x,kde2(x),s=0)
            s1=UnivariateSpline(x,s(x,1),s=0)
            s2=UnivariateSpline(x,s1(x,1),s=0)
            extrema=s1.roots()
            maxima=extrema[np.where((s2(extrema)<0)*(s(extrema)>=min_mode_pdf))]
            if len(maxima)<=max_num_mode:
##                print 'modes: ',maxima
                break
        kde=gaussian_kde(data,bw_method=f)
##    else:
##        print maxima

    return kde,maxima
Example #13
0
    def smooth(self, genome, which_x, which_y):
        interpolationPointsQty = SMOOTHING_WINDOW
        which_y_InterpolationNeighborhood = interpolationPointsQty / 2
        minimunInterpolationNeighborhoodSize = interpolationPointsQty / 4

        if which_y - interpolationPointsQty / 2 < 0:
            interpolationPointsQty -= abs(which_y - which_y_InterpolationNeighborhood) * 2
            which_y_InterpolationNeighborhood = interpolationPointsQty / 2

        elif which_y + interpolationPointsQty / 2 > genome.getHeight() - 1:
            interpolationPointsQty -= (which_y + which_y_InterpolationNeighborhood - (genome.getHeight() - 1)) * 2
            which_y_InterpolationNeighborhood = interpolationPointsQty / 2

        if which_y_InterpolationNeighborhood >= minimunInterpolationNeighborhoodSize:
            x = np.ndarray(interpolationPointsQty)
            y = np.ndarray(interpolationPointsQty)

            for k in xrange(interpolationPointsQty):
                poseToSmooth = which_y - which_y_InterpolationNeighborhood + k
                x[k] = poseToSmooth
                y[k] = genome[poseToSmooth][which_x]

            spl = UnivariateSpline(x, y)
            spl.set_smoothing_factor(SPLINE_SMOOTHING_FACTOR_SPLINE/10)

            for k in xrange(interpolationPointsQty):
                if y[k] != sysConstants.JOINT_SENTINEL:
                    newValue = spl(int(x[k]))
                    genome.setItem(int(x[k]), which_x, newValue)
	def __init__(self, yp, workdir, scale, sm=200):
		'''
		Constructor
		'''
		yp = np.array(yp)
		self.l = len(yp)/2
		self.xPos = (self.l-2)/2 #fPos = (self.l-2)/2 + 2
		tnsc = 2/scale
		print tnsc
		plt.rcParams['font.size'] = 24
		plt.rcParams['lines.linewidth'] = 2.4
		self.workdir = workdir
		
		avProfilePoints = yp[:self.l]
		self.avx = np.append(np.append([0], np.sort(np.tanh(tnsc*avProfilePoints[:self.xPos]))),[1])
		self.av = avProfilePoints[self.xPos:]
		
		sigmaProfilePoints = yp[self.l:]
		self.sigmax = np.append(np.append([0], np.sort(np.tanh(tnsc*sigmaProfilePoints[:self.xPos]))),[1])
		self.sigma = sigmaProfilePoints[self.xPos:]
		
		self.m = UnivariateSpline(self.avx, self.av)
		print "Created spline with " + str(len(self.m.get_knots())) + " knots"

		self.s = UnivariateSpline(self.sigmax, self.sigma)
		print "Created spline with " + str(len(self.s.get_knots())) + " knots"
Example #15
0
def get_derivatives(xs, ys, fd=False):
    """
    return the derivatives of y(x) at the points x
    if scipy is available a spline is generated to calculate the derivatives
    if scipy is not available the left and right slopes are calculated, if both exist the average is returned
    putting fd to zero always returns the finite difference slopes
    """
    try:
        if fd:
            raise SplineInputError('no spline wanted')
        if len(xs) < 4:
            er = SplineInputError('too few data points')
            raise er
        from scipy.interpolate import UnivariateSpline
        spline = UnivariateSpline(xs, ys)
        d = spline.derivative(1)(xs)
    except (ImportError, SplineInputError):
        d = []
        m, left, right = 0, 0, 0
        for n in range(0, len(xs), 1):
            try:
                left = (ys[n] - ys[n-1]) / (xs[n] - xs[n-1])
                m += 1
            except IndexError:
                pass
            try:
                right = (ys[n+1] - ys[n]) / (xs[n+1] - xs[n])
                m += 1
            except IndexError:
                pass
            d.append(left + right / m)
    return d
Example #16
0
def halbwertsbreite(x, y):
    spline = UnivariateSpline(x, y-np.max(y)/2, s=0)
    r1, r2 = spline.roots() # find the roots

    lambda1 = 2*d*np.sin(np.deg2rad(r1))
    lambda2 = 2*d*np.sin(np.deg2rad(r2))
    E1 = h*c/lambda1
    E2 = h*c/lambda2
    DE = E1 - E2
    print ('Halbwertswinkel: {0:.5e} deg, {1:.5e} deg'.format(r1, r2))
    print ('Halbwertsbreite: {0:.5e}'.format(np.abs(r1-r2)))
    print (u'Energieaufloesung: {0:.5e} eV'.format(DE))

    xnew = np.linspace(min(x), max(x))
    ynew = spline(xnew)

    plt.plot(x, y, 'rx', label='Messdaten')
    plt.plot(xnew, ynew+np.max(y)/2,'b-', label='Interpolation')
    plt.axvline(r1)
    plt.axvline(r2)

    plt.grid()
    plt.legend()
    plt.xlabel("doppelter Kristallwinkel in Grad")
    plt.ylabel(u"Zählrate")
Example #17
0
class AlphaInterpolator(object):
    def __init__(self, a, x, y):

        # Drop NaN values to avoid fitpack errors
        self._data = pd.DataFrame(np.array([a, x, y]).T, columns=["a", "x", "y"])
        self._data.dropna(inplace=True)

        self._create_interpolating_polynomials()
        self._find_path_length()

    def _create_interpolating_polynomials(self):
        self.x_interp = UnivariateSpline(self._data.a, self._data.x, s=0)
        self.y_interp = UnivariateSpline(self._data.a, self._data.y, s=0)

    def _find_path_length(self):
        dx_interp = self.x_interp.derivative()
        dy_interp = self.y_interp.derivative()

        ts = np.linspace(0, 1, 200)
        line_length = cumtrapz(np.sqrt(dx_interp(ts) ** 2 + dy_interp(ts) ** 2), x=ts, initial=0.0)

        line_length /= line_length.max()

        # Here we invert the line_length (ts) function, in order to evenly
        # sample the pareto front
        self.l_interp = UnivariateSpline(line_length, ts, s=0)

    def sample(self, num):
        """ Return estimates of alpha values that evenly sample the pareto
        front """

        out = self.l_interp(np.linspace(0, 1, num))
        out[0] = 0.0
        out[-1] = 1.0
        return out
Example #18
0
File: qha.py Project: gmatteo/abipy
    def get_t_for_vols(self, vols, t_max=1000):
        """
        Find the temperatures corresponding to a specific volume.
        The search is performed interpolating the V(T) dependence with a spline and
        finding the roots with of V(t) - v.
        It may return more than one temperature for a volume in case of non monotonic behavior.

        Args:
            vols: list of volumes
            t_max: maximum temperature considered for the fit

        Returns:
            A list of lists of temperatures. For each volume more than one temperature can
            be identified.
        """

        if not isinstance(vols, (list, tuple, np.ndarray)):
            vols = [vols]


        f = self.fit_energies(0, t_max, t_max+1)

        temps = []
        for v in vols:
            spline = UnivariateSpline(f.temp, f.min_vol - v, s=0)
            temps.append(spline.roots())

        return temps
Example #19
0
def smoothing(x,y,err=None,k=5,s=None,newx=None,derivative_order=0):
  # remove NaNs
  idx = np.isfinite(x) & np.isfinite(y)
  if idx.sum() != len(x): x=x[idx]; y=y[idx]

  # if we don't need to interpolate, use same x as input
  if newx is None: newx=x

  if err is None:
    w=None
  elif err == "auto":
    n=len(x)
    imin = int(max(0,n/2-20))
    imax = imin + 20
    idx = range(imin,imax)
    p = np.polyfit(x[idx],y[idx],2)
    e = np.std( y[idx] - np.polyval(p,x[idx] ) )
    w = np.ones_like(x)/e
  else:
    w=np.ones_like(x)/err
  from scipy.interpolate import UnivariateSpline
  if (s is not None):
    s = len(x)*s
  s = UnivariateSpline(x, y,w=w, k=k,s=s)
  if (derivative_order==0):
    return s(newx)
  else:
    try:
      len(derivative_order)
      return np.asarray([s.derivative(d)(newx) for d in derivative_order])
    except:
      return s.derivative(derivative_order)(newx)
Example #20
0
	def getspline_Sold(self):
		"""Cubic spline interpolation of entropy and convective velocity.
		"""
		want = self.mass < max(self.mass)*self.mass_cut
		S_old = UnivariateSpline(self.mass[want], self.Sgas[want], k=self.spline_k, s=self.spline_s, ext=self.spline_ext)
		dS_old = S_old.derivative()
		vconv_Sold = UnivariateSpline(self.mass[want], self.vconv[want], k=self.spline_k, s=self.spline_s, ext=self.spline_ext)
		return [S_old, dS_old, vconv_Sold] 
def fwhm(data, ypos=0.5):
    spatial = data.sum(1)
    spatial = spatial-np.min(spatial)
    spatial_range = range(0, len(spatial))
    spline = UnivariateSpline(spatial_range, (spatial -np.max(spatial)*ypos), s=0.1, k=3)
    roots = spline.roots()
    if len(roots) < 2:
        return np.inf, (-np.inf, +np.inf)
    return roots[-1]-roots[0], roots
Example #22
0
def FWHM_scipy(X, Y):
    """Computing FWHM (Full width at half maximum)"""
    try:
        from scipy.interpolate import UnivariateSpline
        spline = UnivariateSpline(X, Y, s=0)
        r1, r2 = spline.roots()  # find the roots
        return r2 - r1  # return the difference (full width)
    except ImportError:
        return FWHM(X, Y)
Example #23
0
 def __init__(self, x, y, w=None, bbox=[None, None], k=3,
              xname=None, xunits=None, yname=None, yunits=None):
     """Constructor.
     """
     xUnivariateSplineBase.__init__(self, x, y, xname, xunits, yname, yunits)
     _x = numpy.log10(x)
     _y = numpy.log10(y)
     UnivariateSpline.__init__(self, _x, _y, w, bbox, k, s=None)
     self.__integral_spline = None
Example #24
0
def QuasiPWeight(ReSE_A):
	''' calculating the Fermi-liquid quasiparticle weight (residue) Z '''
	N = len(En_A)
	#M = int(1e-3/dE) if dE < 1e-3 else 1	# very fine grids lead to oscillations
	# replace 1 with M below to dilute the grid
	ReSE = UnivariateSpline(En_A[int(N/2-10):int(N/2+10):1],ReSE_A[int(N/2-10):int(N/2+10):1])
	dReSEdw = ReSE.derivatives(0.0)[1]
	Z = 1.0/(1.0-dReSEdw)
	return sp.array([Z,dReSEdw])
Example #25
0
    def interpolate(self, genome, which_x, which_y, wich_y_is_fixed_data=0):
        interpolationPointsQty = SMOOTHING_WINDOW
        which_y_InterpolationNeighborhood = interpolationPointsQty / 2
        minimunInterpolationNeighborhoodSize = interpolationPointsQty / 4
        array_size = 0

        if which_y - which_y_InterpolationNeighborhood < 0:
            interpolationPointsQty -= abs(which_y - which_y_InterpolationNeighborhood) * 2
            which_y_InterpolationNeighborhood = interpolationPointsQty / 2

        elif which_y + interpolationPointsQty / 2 > genome.getHeight() - 1:
            interpolationPointsQty -= (which_y + which_y_InterpolationNeighborhood - (genome.getHeight() - 1)) * 2
            which_y_InterpolationNeighborhood = interpolationPointsQty / 2

        interpolationWindowRadius = interpolationPointsQty / 4


        if which_y_InterpolationNeighborhood >= minimunInterpolationNeighborhoodSize:
            array_size = interpolationPointsQty - interpolationWindowRadius * 2
            if wich_y_is_fixed_data:
                array_size += 1

            x = np.ndarray(array_size)
            y = np.ndarray(array_size)

            splineIndexCounter = 0
            for k in xrange(interpolationPointsQty + 1):
                poseToSmooth = which_y - which_y_InterpolationNeighborhood + k
                if poseToSmooth <= which_y - interpolationWindowRadius or poseToSmooth > which_y + interpolationWindowRadius:
                    x[splineIndexCounter] = poseToSmooth
                    y[splineIndexCounter] = genome[poseToSmooth][which_x]
                    splineIndexCounter += 1

            if wich_y_is_fixed_data:
                x[splineIndexCounter] = which_y
                y[splineIndexCounter] = genome[which_y][which_x]
                splineIndexCounter += 1

            if genome[which_y - interpolationWindowRadius][which_x] == genome[which_y + interpolationWindowRadius][wich_x]:
                spl = interp1d(x, y)
            else:
                x_order = np.argsort(x)
                spl = UnivariateSpline(x_order, y)
                spl.set_smoothing_factor(SPLINE_SMOOTHING_FACTOR_INTERPOLATION/10)

            for k in xrange(interpolationPointsQty):
                iter = which_y - which_y_InterpolationNeighborhood + k
                if genome[iter][which_x] != sysConstants.JOINT_SENTINEL:
                    if iter > which_y - interpolationWindowRadius and iter <= which_y + interpolationWindowRadius:
                        if wich_y_is_fixed_data: #if fixed data do not change the which_y point
                            if iter != which_y:
                                newValue = spl(iter)
                                genome.setItem(iter, which_x, newValue)
                        else:
                            newValue = spl(iter)
                            genome.setItem(iter, which_x, newValue)
Example #26
0
 def MTF50(self, MTFx,MTFy):
     '''
     return object resolution as [line pairs/mm]
            where MTF=50%
            see http://www.imatest.com/docs/sharpness/
     '''
     if self.mtf_x is None:
         self.MTF()
     f = UnivariateSpline(self.mtf_x, self.mtf_y-0.5)
     return f.roots()[0]
Example #27
0
def find_critical_temperature(df, offset):
    curve = []
    temps = np.round(df.temperature,2).unique()
    for temp in temps:
        curve.append([temp,df.loc[np.round(df.temperature,2) == temp].TTc.mean()])
    curve = np.array(curve)
    curve = curve[curve[:,0].argsort()]
    f = UnivariateSpline(curve[:,0],curve[:,1]-offset)
    root = f.roots()
    return root[0]
def interpolate_to_find_crossover(frequencies,spl):
    from scipy.interpolate import UnivariateSpline

    s = UnivariateSpline(frequencies,spl,s=0)
    root = []
    for r in s.roots():
        if r>=1000 and r<=5000:
            root.append(r)

    if len(root)==1: return root[0]
    else: return 0
def thermCond_fit_UnivariateSpline_model(T, data=[]):
    # This is a fitting model for creating an equation when given a list of x,y values
    x = data[0]
    y = data[1]
    if len(x) <= 5:
        order = len(x)-1
    else:
        order = 5
    fit = UnivariateSpline(x, y, k=order)
    thermCond = fit.__call__(T)
    return thermCond
Example #30
0
    def get_profile_func(self, profile_x, profile_y):

        from scipy.interpolate import UnivariateSpline
        profile_ = UnivariateSpline(profile_x, profile_y, k=3, s=0,
                                    bbox=[0, 1])
        integ = profile_.integral(0, 1)

        def profile(o, x, slitpos):
            return profile_(slitpos) / integ

        return profile
Example #31
0
if __name__ == '__main__':
    start = time.time()

    T = 850.0  # Unit: K
    P = 1.07 * 1.0e5  # Unit: bar

    moleFraction = 'C6H10:0.008, HE:0.992'
    reactionIndex = 2280  # C3H5-A + C6H10 = C3H6 + C6H9
    targetSpc = ['C3H6']
    factorLgList = [float(i) / 100 for i in range(-50, 10, 10)]
    targetMoleFractionFile = '850.txt'

    [factorList,
     moleFractionC3H6List] = traverseFactor(T, P, moleFraction, factorLgList,
                                            reactionIndex, targetSpc)
    para = UnivariateSpline(moleFractionC3H6List, factorList)

    targetMoleFraction = readTargetMoleFraction(targetMoleFractionFile)
    optimizedFactor = para(targetMoleFraction)
    with open('optimizedFactor.txt', 'wb') as f:
        output = csv.writer(f, delimiter=',', quoting=csv.QUOTE_ALL)
        output.writerow(['Optimized F:', optimizedFactor])
        output.writerow(['targetMoleFraction', targetMoleFraction])
        for i in range(len(moleFractionC3H6List)):
            output.writerow([factorList[i], moleFractionC3H6List[i]])

    plt.plot(factorList, moleFractionC3H6List, 'r--', optimizedFactor,
             targetMoleFraction, 'bs')
    plt.xlabel('factor')
    plt.ylabel('C3H6 Mole Fraction / ppm')
    plt.savefig('factor_vs_C3H6.png')
Example #32
0
def smooth_spline(x, y, s):
    s = UnivariateSpline(x, y, s=s)
    return s(x)
 def __call__(self, x, nu=0):
     return UnivariateSpline.__call__(self, x % self.T, nu=nu)
Example #34
0
def find_point_in_lips(points_upper, points_lower, points_upper_inside,
                       points_lower_inside, rot_angle, displacement, radius):

    #find where a circle with radius (radius) and center in the corner of the
    #lip enconters the spline represinting the upper lip
    rot_matrix = np.array([[np.cos(rot_angle),
                            np.sin(rot_angle)],
                           [-np.sin(rot_angle),
                            np.cos(rot_angle)]])
    rot_matrix_inv = np.array([[np.cos(rot_angle), -np.sin(rot_angle)],
                               [np.sin(rot_angle),
                                np.cos(rot_angle)]])

    x = points_upper[:, 0]
    y = points_upper[:, 1]

    rot_x, rot_y = rot_matrix.dot([x - displacement[0], y - displacement[1]])

    spline = UnivariateSpline(rot_x, rot_y, s=1)
    new_rot_x = np.arange(int(round(min(rot_x), 0)),
                          int(round(max(rot_x), 0)) + 1)
    new_rot_y = spline(new_rot_x)

    euclid_distance = np.sqrt(new_rot_x * new_rot_x + new_rot_y * new_rot_y)
    temp = abs(euclid_distance - radius)
    idx_min = np.argmin(temp)  #this one takes 0.000997781753540039s
    #idx_min = int(np.where(temp==temp.min())[0])  #this one takes 0.0009992122650146484s
    cross_lip_rot_x_upper = new_rot_x[idx_min]
    cross_lip_rot_y_upper = new_rot_y[idx_min]

    new_x_upper, new_y_upper = rot_matrix_inv.dot(
        [cross_lip_rot_x_upper, cross_lip_rot_y_upper])
    new_x_upper = new_x_upper + displacement[0]
    new_y_upper = new_y_upper + displacement[1]

    new_point_upper = np.array([new_x_upper, new_y_upper])

    #find the mouth openness
    x = points_lower[:, 0]
    y = points_lower[:, 1]

    rot_x, rot_y = rot_matrix.dot([x - new_x_upper, y - new_y_upper])

    spline = UnivariateSpline(rot_x, rot_y, s=1)
    new_rot_x = 0  #np.arange(int(round(min(rot_x),0)),int(round(max(rot_x),0))+1)
    new_rot_y = spline(new_rot_x)

    cross_lip_rot_x_lower = new_rot_x
    cross_lip_rot_y_lower = new_rot_y

    new_x_lower, new_y_lower = rot_matrix_inv.dot(
        [cross_lip_rot_x_lower, cross_lip_rot_y_lower])
    new_x_lower = new_x_lower + new_x_upper
    new_y_lower = new_y_lower + new_y_upper

    new_point_lower = np.array([new_x_lower, new_y_lower])

    #find the teeth show
    x = points_upper_inside[:, 0]
    y = points_upper_inside[:, 1]
    rot_x, rot_y = rot_matrix.dot([x - new_x_upper, y - new_y_upper])

    spline = UnivariateSpline(rot_x, rot_y, s=1)
    new_rot_x = 0  #np.arange(int(round(min(rot_x),0)),int(round(max(rot_x),0))+1)
    new_rot_y = spline(new_rot_x)

    cross_lip_rot_x_upper_inside = new_rot_x
    cross_lip_rot_y_upper_inside = new_rot_y

    new_x_upper_inside, new_y_upper_inside = rot_matrix_inv.dot(
        [cross_lip_rot_x_upper_inside, cross_lip_rot_y_upper_inside])
    new_x_upper_inside = new_x_upper_inside + new_x_upper
    new_y_upper_inside = new_y_upper_inside + new_y_upper

    new_point_upper_inside = np.array([new_x_upper_inside, new_y_upper_inside])

    x = points_lower_inside[:, 0]
    y = points_lower_inside[:, 1]
    rot_x, rot_y = rot_matrix.dot([x - new_x_upper, y - new_y_upper])

    spline = UnivariateSpline(rot_x, rot_y, s=1)
    new_rot_x = 0  #np.arange(int(round(min(rot_x),0)),int(round(max(rot_x),0))+1)
    new_rot_y = spline(new_rot_x)

    cross_lip_rot_x_lower_inside = new_rot_x
    cross_lip_rot_y_lower_inside = new_rot_y

    new_x_lower_inside, new_y_lower_inside = rot_matrix_inv.dot(
        [cross_lip_rot_x_lower_inside, cross_lip_rot_y_lower_inside])
    new_x_lower_inside = new_x_lower_inside + new_x_upper
    new_y_lower_inside = new_y_lower_inside + new_y_upper

    new_point_lower_inside = np.array([new_x_lower_inside, new_y_lower_inside])

    #compute mouth openness and teeth show
    openness = cross_lip_rot_y_lower - cross_lip_rot_y_upper  #new_rot_y
    theet_show = cross_lip_rot_y_lower_inside - cross_lip_rot_y_upper_inside
    if theet_show < 0:
        theet_show = 0

    return new_point_upper, new_point_lower, new_point_upper_inside, new_point_lower_inside, openness, theet_show
Example #35
0
import numpy as np
from scipy.interpolate import UnivariateSpline
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
pp = PdfPages('windspeed.pdf')
print("Setup Complete")
max_speeds = np.load('max-speeds.npy')
print(max_speeds)
print(max_speeds.shape)
years_nb = max_speeds.shape[0]
cprob = (np.arange(years_nb, dtype=np.float32) + 1) / (years_nb + 1)
print(cprob)
sorted_max_speeds = np.sort(max_speeds)

quantile_func = UnivariateSpline(cprob, sorted_max_speeds)

nprob = np.linspace(0, 1, 100)
fitted_max_speeds = quantile_func(nprob)

fifty_prob = 1. - 0.02
fifty_wind = quantile_func(fifty_prob)

plt.plot(sorted_max_speeds, cprob, 'o')
plt.plot(fitted_max_speeds, nprob, 'g--')
plt.plot([fifty_wind], [fifty_prob], 'o', ms=8., mfc='y', mec='y')
plt.text(30, 0.05, '$V_{50} = %.2f \, m/s$' % fifty_wind)
plt.plot([fifty_wind, fifty_wind], [plt.axis()[2], fifty_prob], 'k--')
plt.xlabel('Annual wind speed maxima [$m/s$]')
plt.ylabel('Cumulative probability')
pp.savefig()
    def extrapolation_smoothing(
        self,
        extrapolated_data: DataArray,
        rho_arr: DataArray,
    ):
        """Function to smooth extrapolatd data. Extrapolated data may not have
        any 0th order discontinuity but 1st order discontinuities may exist.
        Smoothing is necessary to eliminate these higher order discontinuities.

        Parameters
        ----------
        extrapolated_data
            xarray.DataArray extrapolated data to be smoothed.
            Dimensions (rho, theta, t)
        rho_arr
            xarray.DataArray used to construct smoothing splines. Dimensions (rho)
            (Must be higher or the same resolution as the rho dimension
            of extrapolated_data)

        Returns
        -------
        extrapolated_smooth_lfs_arr
            Extrapolated smoothed data on low-field side (fixed theta = 0)
        extrapolated_smooth_hfs_arr
            Extrapolated smoothed data on high-field side (fixed theta = pi)
        """
        t = extrapolated_data.coords["t"]

        extrapolated_smooth_lfs = []
        extrapolated_smooth_hfs = []

        for ind_t, it in enumerate(extrapolated_data.coords["t"]):
            variance_extrapolated_data_lfs = extrapolated_data.isel(
                {"t": ind_t, "theta": 0}
            ).var("rho_poloidal")

            variance_extrapolated_data_hfs = extrapolated_data.isel(
                {"t": ind_t, "theta": 1}
            ).var("rho_poloidal")

            extrapolated_spline_lfs = UnivariateSpline(
                rho_arr,
                extrapolated_data.isel(t=ind_t).sel(theta=0),
                k=5,
                s=0.001 * variance_extrapolated_data_lfs,
            )

            extrapolated_spline_hfs = UnivariateSpline(
                rho_arr,
                extrapolated_data.isel(t=ind_t).sel(theta=np.pi),
                k=5,
                s=0.001 * variance_extrapolated_data_hfs,
            )

            extrapolated_smooth_lfs.append(extrapolated_spline_lfs(rho_arr, 0))
            extrapolated_smooth_hfs.append(extrapolated_spline_hfs(rho_arr, 0))

        extrapolated_smooth_lfs_arr = DataArray(
            data=extrapolated_smooth_lfs,
            coords={"t": t, "rho_poloidal": rho_arr},
            dims=["t", "rho_poloidal"],
        )

        extrapolated_smooth_hfs_arr = DataArray(
            data=extrapolated_smooth_hfs,
            coords={"t": t, "rho_poloidal": rho_arr},
            dims=["t", "rho_poloidal"],
        )

        extrapolated_smooth_lfs_arr = extrapolated_smooth_lfs_arr.transpose(
            "rho_poloidal", "t"
        )
        extrapolated_smooth_hfs_arr = extrapolated_smooth_hfs_arr.transpose(
            "rho_poloidal", "t"
        )

        # Following section is to ensure that near the rho_poloidal=0 region, the
        # extrapolated_smooth_data is constant (ie. with a first-order derivative of 0).
        inv_extrapolated_smooth_hfs = DataArray(
            data=np.flip(extrapolated_smooth_hfs_arr.data, axis=0),
            coords={
                "rho_poloidal": -1
                * np.flip(extrapolated_smooth_hfs_arr.coords["rho_poloidal"].data),
                "t": extrapolated_smooth_hfs_arr.coords["t"].data,
            },
            dims=["rho_poloidal", "t"],
        )

        inv_rho_arr = inv_extrapolated_smooth_hfs.coords["rho_poloidal"].data
        inv_del_val = inv_rho_arr[-1]

        inv_extrapolated_smooth_hfs = inv_extrapolated_smooth_hfs.drop_sel(
            rho_poloidal=inv_del_val
        )

        extrapolated_smooth_mid_plane_arr = concat(
            (inv_extrapolated_smooth_hfs, extrapolated_smooth_lfs_arr), "rho_poloidal"
        )

        rho_zero_ind = np.where(
            np.isclose(extrapolated_smooth_mid_plane_arr.rho_poloidal.data, 0.0)
        )[0][0]

        smooth_central_region = extrapolated_smooth_mid_plane_arr.isel(
            rho_poloidal=slice(rho_zero_ind - 2, rho_zero_ind + 3)
        )

        smooth_central_region.loc[:, :] = smooth_central_region.max(dim="rho_poloidal")

        extrapolated_smooth_mid_plane_arr.loc[
            extrapolated_smooth_mid_plane_arr.rho_poloidal.data[
                rho_zero_ind - 2
            ] : extrapolated_smooth_mid_plane_arr.rho_poloidal.data[rho_zero_ind + 2],
            :,
        ] = smooth_central_region

        inv_extrapolated_smooth_hfs = extrapolated_smooth_mid_plane_arr.isel(
            rho_poloidal=slice(0, rho_zero_ind + 1)
        )

        extrapolated_smooth_hfs_arr = DataArray(
            data=np.flip(inv_extrapolated_smooth_hfs.data, axis=0),
            coords=extrapolated_smooth_hfs_arr.coords,
            dims=extrapolated_smooth_hfs_arr.dims,
        )

        # Ignoring mypy warning since it seems to be unaware that the xarray .loc
        # method uses label-based indexing and slicing instead of integer-based.
        extrapolated_smooth_lfs_arr = extrapolated_smooth_mid_plane_arr.loc[
            0:  # type: ignore
        ]

        return extrapolated_smooth_lfs_arr, extrapolated_smooth_hfs_arr
Example #37
0
def SMART_obs_calc(degree_overlap, manual_overlap):
    """
    Work out how many observations are required to cover the southern sky
    """

    #setting up the dec ranges
    dec_range = [-72., -55., -40.5, -26.7, -13., +1.6, +18.3] #Gleam pointings
    delays_range = [[0,0,0,0,6,6,6,6,12,12,12,12,18,18,18,18],\
                    [0,0,0,0,4,4,4,4,8,8,8,8,12,12,12,12],\
                    [0,0,0,0,2,2,2,2,4,4,4,4,6,6,6,6],\
                    [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\
                    [6,6,6,6,4,4,4,4,2,2,2,2,0,0,0,0],\
                    [12,12,12,12,8,8,8,8,4,4,4,4,0,0,0,0],\
                    [18,18,18,18,12,12,12,12,6,6,6,6,0,0,0,0]]

    print("Using GLEAM dec range: {}".format(dec_range))
    """
    sweet_dec_range = [-82.8,-71.4,-63.1,-55.,-47.5,-40.4,-33.5,-26.7,-19.9,-13.,-5.9,1.6,9.7,18.6,29.4,44.8]
    sweet_delays_range= [[0,0,0,0,7,7,7,7,14,14,14,14,21,21,21,21],\
                         [0,0,0,0,6,6,6,6,12,12,12,12,18,18,18,18],\
                         [0,0,0,0,5,5,5,5,10,10,10,10,15,15,15,15],\
                         [0,0,0,0,4,4,4,4,8,8,8,8,12,12,12,12],\
                         [0,0,0,0,3,3,3,3,6,6,6,6,9,9,9,9],\
                         [0,0,0,0,2,2,2,2,4,4,4,4,6,6,6,6],\
                         [0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3],\
                         [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\
                         [3,3,3,3,2,2,2,2,1,1,1,1,0,0,0,0],\
                         [6,6,6,6,4,4,4,4,2,2,2,2,0,0,0,0],\
                         [9,9,9,9,6,6,6,6,3,3,3,3,0,0,0,0],\
                         [12,12,12,12,8,8,8,8,4,4,4,4,0,0,0,0],\
                         [15,15,15,15,10,10,10,10,5,5,5,5,0,0,0,0],\
                         [18,18,18,18,12,12,12,12,6,6,6,6,0,0,0,0],\
                         [21,21,21,21,14,14,14,14,7,7,7,7,0,0,0,0],\
                         [24,24,24,24,16,16,16,16,8,8,8,8,0,0,0,0]]

    dec_range = []
    delays_range =[]
    sweet_spots_range = [0,2,4,7,10,12,14]
    for i in sweet_spots_range:
      dec_range.append(sweet_dec_range[i])
      delays_range.append(sweet_delays_range[i])
    print dec_range
    """

    #Going to work out how many pointings are needed
    #setting up some metadata requirements
    time = 4800 #one hour 20 min
    channels = range(107,131)
    minfreq = float(min(channels))
    maxfreq = float(max(channels))
    centrefreq = 1.28 * (minfreq + (maxfreq-minfreq)/2) #in MHz

    start_obsid = '1117624530'
    start_ra = 180.
    Dec_FWHM_calc = []
    RA_FWHM_calc = []
    for i in range(-89,89,1):
        for j in range(0,361,1):
            Dec_FWHM_calc.append(i)
            RA_FWHM_calc.append(j)

    observations = []
    ra_list =[]
    dec_list =[]
    delays_list = []
    FWHM = []
    FWHM_Dec = []
    pointing_count = 0
    for i in range(len(dec_range)):
        #calculating the FWHM at this dec
        ra_sex, deg_sex = fpio.deg2sex(start_ra, dec_range[i])
        cord = [start_obsid, str(ra_sex), str(deg_sex), 1, delays_range[i],centrefreq, channels]
        #powout=get_beam_power(cord, zip(RA_FWHM_calc,Dec_FWHM_calc), dt=600)
        names_ra_dec = np.column_stack((['source']*len(RA_FWHM_calc), RA_FWHM_calc, Dec_FWHM_calc))
        powout = fpio.get_beam_power_over_time(cord, names_ra_dec, dt=600, degrees = True)
        powout_RA_line = []
        powout_Dec_line = []
        RA_line = []
        Dec_line = []
        for p in range(len(powout)):
            #print(int(y[i]/np.pi*180.), int(dec) )
            if int(Dec_FWHM_calc[p]) == int(dec_range[i]):
                powout_RA_line.append(float(powout[p]))
                RA_line.append(float(RA_FWHM_calc[p]))
            if int (RA_FWHM_calc[p]) == int(start_ra):
                powout_Dec_line.append(float(powout[p]))
                Dec_line.append(float(Dec_FWHM_calc[p]))

        print("\nValues for Dec " + str(dec_range[i]))
        #work out RA FWHM (not including the drift scan, 0sec observation)
        if args.fwhm:
            spline = UnivariateSpline(RA_line, powout_RA_line-np.max(powout_RA_line)/2., s=0)
        else:
            spline = UnivariateSpline(RA_line, powout_RA_line-np.full(len(powout_RA_line),0.5), s=0)
        try:
            r1, r2 = spline.roots()
        except ValueError:
            print("No FWHM for " + str(dec_range[i]) + " setting to 1000 to skip")
            FWHM.append(1000.)
            pointing_count -=1
        else:
            FWHM.append(float(r2-r1))
            print("FWHM along RA at dec "+ str(dec_range[i]) + ": " + str(FWHM[i]))

        #work out Dec FWHM
        if args.fwhm:
            spline = UnivariateSpline(Dec_line, powout_Dec_line-np.max(powout_Dec_line)/2., s=0)
            r1, r2 = spline.roots()
            FWHM_Dec.append(float(r2-r1))
            print("FWHM along Dec at dec "+ str(dec_range[i]) + ": " + str(FWHM_Dec[i]))

        deg_move = total_angle = FWHM[i] - degree_overlap*math.cos(math.radians(dec_range[i])) + \
                    float(time)/3600.*15.*math.cos(math.radians(dec_range[i]))
        if manual_overlap is not None:
            point_num_this_deg = manual_overlap[i]
        else:
            point_num_this_deg = int(360./deg_move) + 1
        print("Number for this dec: " +str(point_num_this_deg))
        deg_move = 360. / point_num_this_deg
        overlap_true = FWHM[i] + float(time)/3600.*15.*math.cos(math.radians(dec_range[i])) -\
                       360./point_num_this_deg
        print("True overlap this dec: " + str(overlap_true))

        # offset every second dec range by half a FWHM in RA
        for x in range(point_num_this_deg):
            if i % 2 == 0:
                temp_ra = start_ra + x * deg_move
                observations.append(str(int(start_obsid) + int(x*deg_move*240)))
            else:
                temp_ra = start_ra + x * deg_move +\
                          deg_move / math.cos(math.radians(dec_range[i]))
                observations.append(str(int(start_obsid) + int(x*deg_move*240) +\
                                        int(deg_move*120)))
            if temp_ra > 360.:
               temp_ra = temp_ra -360.
            ra_list.append(temp_ra)
            dec_list.append(dec_range[i])
            delays_list.append(delays_range[i])
            total_angle += deg_move
            pointing_count+=1

    #Sort by ra
    dec_list =     [x for _,x in sorted(zip(ra_list,dec_list))]
    delays_list =  [x for _,x in sorted(zip(ra_list,delays_list))]
    observations = [x for _,x in sorted(zip(ra_list,observations))]
    ra_list = sorted(ra_list)

    return observations, dec_list, ra_list, delays_list
Example #38
0
def main():

    # make output folder
    try:
        os.makedirs('scaler_output')
    except FileExistsError:
        pass

    # define datasets, datasetB is scaled to match datasetA
    datasetA = 'data/krogan_lab_EMAP_screens/cF3.txt'
    datasetB = 'data/SGA_NxN_avg.txt'

    # read in the two datasets   
    ints, profs, genes = read_square_dataset_small(datasetA,
        "","\t",split=True,profiles = False)
    b_ints, b_profs, b_genes = read_square_dataset_small(datasetB,
        "","\t",split=True,profiles = False)

    datasetA = datasetA.split('/')[-1].split('.')[0]
    datasetB = datasetB.split('/')[-1].split('.')[0]

    avalues = []; bvalues=[]
    for i in ints : 
        if i in b_ints :
            avalues.append(ints[i])
            bvalues.append(b_ints[i])

    asorted = sorted(avalues)
    bsorted = sorted(bvalues)

    # shift datasetB so that it has the same number of negative values
    # as datasetA (makes it a little easier to scale)
    adjustment = -bsorted[len([x for x in asorted if x < 0])]
    bsorted = [x + adjustment for x in bsorted]

    # plot scatter plot showing shared interactions
    density_scatter_plot(ints,b_ints,'scaler_output/unscaled_scatter.png',
                         xlabel='S-score', ylabel='SGA score')

    # record dataset information in log
    with open('scaler_output/scaler_log.txt', 'w') as f:
        f.write("cF3 EMAP has {} interactions\n".format(len(ints)))
        f.write("SGA_NxN has {} interactions\n".format(len(b_ints)))
        f.write("The sets have {} interactions in common\n".format(
                len(avalues)))
        f.write("Dataset correlation = {}\n".format(
                np.corrcoef(avalues,bvalues)[0][1]))
        f.write("Adjustment so that the SGA_NxN shared interaction "
                "set has the same number of negative values as the "
                "cF3 EMAP.\nadjustment={}\n".format(adjustment))

    ## Computing scaling values
    #essentially the data is partitioned into 100 overlapping bins
    #the mean value of bin[0] in datasetB is divided by the mean value of bin[0] from datasetA
    #this gives a scaling factor for values in the range (min(bin[0]), max(bin[0]))
    #values close to zero give unpredictable scaling factors, so they are ignored. 
    #Depending on the size of your overlap you may want to tweak the number of bins

    bins = 500
    binsize = len(avalues) / bins
    score = []; scale = []

    lower_threshold = 0.05
    upper_threshold = 0.99

    for i in np.arange(1,bins*lower_threshold) :
        start = int(i*binsize - binsize)
        end = int(i*binsize + binsize)
        score.append(np.mean(bsorted[start:end]))
        scale.append(np.mean(asorted[start:end])/np.mean(bsorted[start:end]))
    for i in np.arange(bins*upper_threshold,bins) :
        start = int(i*binsize - binsize)
        end = int(i*binsize + binsize)
        score.append(np.mean(bsorted[start:end]))
        scale.append(np.mean(asorted[start:end])/np.mean(bsorted[start:end]))

    # This function creates a curve which maps scores to scaling factors
    # the s=0.02 defines how close the curve fits your data points
    # large values give crap curves, small values may overfit your data
    # it's best to look at the resulting curve and tweak s= as appropriate
    svalue = 0.02
    s = UnivariateSpline(score, scale, s=svalue)

    #displays the scaling values(in red) and the fitted curve (in black)
    fig = plt.figure(figsize=(6, 6), dpi= 80, facecolor='w', edgecolor='k')
    plt.plot(np.arange(min(score),max(score),0.01), # changed from scatter
                [s(x) for x in np.arange(min(score),max(score),0.01)],
                color="red")
    plt.scatter(score, scale, color="black")
    plt.xlim(1.1*min(score), 1.1*max(score))
    plt.ylim(0.9*min(scale), 1.1*max(scale))
    plt.ylabel('Scaling Factor')
    plt.xlabel('SGA Score')
    pylab.savefig("scaler_output/scaling_factor_curve.png")

    # if the value to be scaled is larger than any value in our training set, we use
    # the scaling factor from the largest observed value
    def s_bounded(x) :
        if x<min(score) :
            x = min(score)
        elif x > max(score) :
            x=max(score)
        return s(x)

    #This function applies our scaling factor to a given value
    g= lambda x : (x + adjustment) * s_bounded(x + adjustment)

    for i in b_ints :
        b_ints[i] = float(g(b_ints[i]))

    scaled_dataset_file = "data/SGA_NxN_scaled_to_cF3.txt"
    output_delimited_text(scaled_dataset_file,b_genes,b_genes,b_ints,True)

    # save scaling info to log
    with open('scaler_output/scaler_log.txt', 'a') as f:
        f.write("Number of bins used: {}\n".format(bins))
        f.write("Lower threshold for bins: {}\n".format(
                lower_threshold))
        f.write("Upper threshold for bins: {}\n".format(
                upper_threshold))
        f.write("S value for fitting spline: {}\n".format(
                svalue))
        f.write("max_score={}\n".format(max(score)))
        f.write("min_score={}\n".format(min(score)))

    # save spline for scaling full SGA in R
    scores = np.arange(min(score),max(score),0.01)
    scales = [float(s(x)) for x in np.arange(min(score),max(score),0.01)]
    spline = pd.DataFrame(data=np.stack((scores,scales)).T,
                          columns=['score', 'scale'])
    spline.to_csv('scaler_output/spline.txt', sep='\t', index=False)

    # Plot scatter plot of shared interactions after scaling
    density_scatter_plot(ints, b_ints, 'scaler_output/scaled_scatter.png',
                         xlabel='S-score', ylabel='Scaled SGA score')
    
    # Make QQ Plots using the interactions before and after scaling
    avalues_after_scaling = []; bvalues_after_scaling = []
    for i in ints : 
        if i in b_ints :
            avalues_after_scaling.append(ints[i])
            bvalues_after_scaling.append(b_ints[i])

    # qqplot_2samples puts the "2nd Sample" on the x-axis
    # see documentation for statsmodels.graphics.gofplots
    qqplot_scaled = qqplot_2samples(np.array(bvalues_after_scaling),
                                    np.array(avalues_after_scaling),
                                    xlabel='S-score Quantiles',
                                    ylabel='Scaled SGA score Quantiles',
                                    line='r')
    pylab.savefig("scaler_output/qq_scaled.png")

    qqplot_unscaled = qqplot_2samples(np.array(bvalues),
                                      np.array(avalues),
                                      xlabel='S-score Quantiles',
                                      ylabel='SGA score Quantiles',
                                      line='r')
    pylab.savefig("scaler_output/qq_unscaled.png")
Example #39
0
def Nderivat(func, x):
    spl = UnivariateSpline(x, func, k=3, s=0)
    derivativas = spl.derivative()
    return derivativas(x)
Example #40
0
    def initialize(self, data1, data2):
        from scipy.interpolate import UnivariateSpline

        data1s, data2s = zip(*sorted(zip(data1, data2)))

        self.sp = UnivariateSpline(data1s, data2s)
Example #41
0
                     [21.504318130, 0.072918990], [21.629257560, 0.071662650],
                     [21.754922880, 0.070427950], [21.881318310, 0.069214520],
                     [22.008448090, 0.068022000], [22.136316500, 0.066850030],
                     [22.264927820, 0.065698250], [22.394286360, 0.064566310],
                     [22.524396470, 0.063453880], [22.655262520, 0.062360610],
                     [22.786888900, 0.061286180], [22.919280020, 0.060230260],
                     [23.052440330, 0.059192540], [23.186374300, 0.058172690],
                     [23.321086420, 0.057170410], [23.456581220, 0.056185410],
                     [23.592863230, 0.055217370], [23.729937040, 0.054266010],
                     [23.867807240, 0.053331050], [24.006478470, 0.052412190],
                     [24.145955370, 0.051509160], [24.286242620, 0.050621700],
                     [24.427344940, 0.049749520], [24.569267070, 0.048892370],
                     [24.712013750, 0.048049990], [24.855589790, 0.047222120],
                     [25.000000000, 0.046408510], [25.001000000, 0.046402940]])

cp_curve_spline = UnivariateSpline(cp_curve[:, 0], cp_curve[:, 1], ext='const')
cp_curve_spline.set_smoothing_factor(.000001)


class Nrel5MW(OneTypeWindTurbines):
    def __init__(self):
        OneTypeWindTurbines.__init__(self,
                                     'Nrel5MW',
                                     diameter=126.4,
                                     hub_height=90,
                                     ct_func=self._ct,
                                     power_func=self._power,
                                     power_unit='kW')

    def _ct(self, u):
        return np.interp(u, ct_curve[:, 0], ct_curve[:, 1])
Example #42
0
    def initialize(self,
                   data1,
                   data2,
                   frac_training_data=0.75,
                   max_iter=100,
                   s_iter_decrease=0.75,
                   verb=False):
        from scipy.interpolate import UnivariateSpline

        if verb:
            print(" --------------------")

        # Random subsetting of parts of the data
        train_idx = random.sample(range(len(data1)),
                                  int(len(data1) * frac_training_data))
        i = 0
        train_data1 = []
        train_data2 = []
        test_data1 = []
        test_data2 = []
        for d1, d2 in zip(data1, data2):
            if i in train_idx:
                train_data1.append(data1[i])
                train_data2.append(data2[i])
            else:
                test_data1.append(data1[i])
                test_data2.append(data2[i])
            i += 1

        # Sorted data points
        data1s, data2s = zip(*sorted(zip(data1, data2)))
        test_data1s, test_data2s = zip(*sorted(zip(test_data1, test_data2)))
        train_data1s, train_data2s = zip(
            *sorted(zip(train_data1, train_data2)))

        # Use initial linear Smoothing to find good smoothing parameter s
        smlin = SmoothingLinear()
        smlin.initialize(data2, data1)
        data2_lin_aligned = smlin.predict(data2)
        stdev_lin = numpy.std(
            numpy.array(data1) - numpy.array(data2_lin_aligned))
        linear_error = stdev_lin * stdev_lin

        # Perform initial spline approximation
        self.s = linear_error * len(train_data1s)
        self.sp = UnivariateSpline(train_data1s, train_data2s, k=3, s=self.s)

        # Apply spline approximation to the testdata
        test_data1_aligned = self.sp(test_data1)
        test_stdev = numpy.std(
            numpy.array(test_data2) - numpy.array(test_data1_aligned))
        if verb:
            test_median = numpy.median(
                numpy.array(test_data2) - numpy.array(test_data1_aligned))
            train_data1_aligned = self.sp(train_data1)
            tr_stdev = numpy.std(
                numpy.array(train_data2) - numpy.array(train_data1_aligned))
            tr_median = numpy.median(
                numpy.array(train_data2) - numpy.array(train_data1_aligned))
            print("  Lin:Computed stdev", stdev_lin)
            print("  Train Computed stdev", tr_stdev, "and median", tr_median)
            print("  Test Computed stdev", test_stdev, "and median",
                  test_median)

        stdev_prev = test_stdev
        s_prev = self.s
        s_iter = self.s
        myIter = 0
        for i in range(max_iter):
            s_iter = s_iter * s_iter_decrease
            self.sp = UnivariateSpline(train_data1s,
                                       train_data2s,
                                       k=3,
                                       s=s_iter)
            test_data1_aligned = self.sp(test_data1)
            stdev = numpy.std(
                numpy.array(test_data2) - numpy.array(test_data1_aligned))
            if verb:
                print(
                    " == Iter", s_iter, "\tstdev",
                    numpy.std(
                        numpy.array(test_data2) -
                        numpy.array(test_data1_aligned)))

            # Stop if stdev does not improve significantly any more
            #if stdev_prev - stdev < 0 or (i > 5 and (stdev_prev - stdev < 0.5)):
            if stdev_prev - stdev < 0:
                break

            stdev_prev = stdev
            s_prev = s_iter

        if verb:
            print(" == Done ", s_prev)

        # Final spline
        self.s = s_prev
        self.sp = UnivariateSpline(data1s, data2s, k=3, s=self.s)
Example #43
0
def getError(xTest, yTest, xVal, yVal, s):
    #print(s)
    sp = UnivariateSpline(xTest, yTest, s=s)
    testError = (np.mean(np.power((sp(xTest) - yTest), 2)))
    validateError = (np.mean(np.power((sp(xVal) - yVal), 2)))
    return validateError
Example #44
0
class TablePSF(object):
    r"""Radially-symmetric table PSF.

    This PSF represents a :math:`PSF(r)=dP / d\Omega(r)`
    spline interpolation curve for a given set of offset :math:`r`
    and :math:`PSF` points.

    Uses `scipy.interpolate.UnivariateSpline`.

    Parameters
    ----------
    rad : `~astropy.units.Quantity` with angle units
        Offset wrt source position
    dp_domega : `~astropy.units.Quantity` with sr^-1 units
        PSF value array
    spline_kwargs : dict
        Keyword arguments passed to `~scipy.interpolate.UnivariateSpline`

    Notes
    -----
    * This PSF class works well for model PSFs of arbitrary shape (represented by a table),
      but might give unstable results if the PSF has noise.
      E.g. if ``dp_domega`` was estimated from histograms of real or simulated event data
      with finite statistics, it will have noise and it is your responsibility
      to check that the interpolating spline is reasonable.
    * To customize the spline, pass keyword arguments to `~scipy.interpolate.UnivariateSpline`
      in ``spline_kwargs``. E.g. passing ``dict(k=1)`` changes from the default cubic to
      linear interpolation.
    * TODO: evaluate spline for ``(log(rad), log(PSF))`` for numerical stability?
    * TODO: merge morphology.theta class functionality with this class.
    * TODO: add FITS I/O methods
    * TODO: add ``normalize`` argument to ``__init__`` with default ``True``?
    * TODO: ``__call__`` doesn't show up in the html API docs, but it should:
      https://github.com/astropy/astropy/pull/2135
    """
    def __init__(self,
                 rad,
                 dp_domega,
                 spline_kwargs=DEFAULT_PSF_SPLINE_KWARGS):

        self._rad = Angle(rad).to('radian')
        self._dp_domega = Quantity(dp_domega).to('sr^-1')

        assert self._rad.ndim == self._dp_domega.ndim == 1
        assert self._rad.shape == self._dp_domega.shape

        # Store input arrays as quantities in default internal units
        self._dp_dr = (2 * np.pi * self._rad * self._dp_domega).to('radian^-1')
        self._spline_kwargs = spline_kwargs

        self._compute_splines(spline_kwargs)

    @classmethod
    def from_shape(cls, shape, width, rad):
        """Make TablePSF objects with commonly used shapes.

        This function is mostly useful for examples and testing.

        Parameters
        ----------
        shape : {'disk', 'gauss'}
            PSF shape.
        width : `~astropy.units.Quantity` with angle units
            PSF width angle (radius for disk, sigma for Gauss).
        rad : `~astropy.units.Quantity` with angle units
            Offset angle

        Returns
        -------
        psf : `TablePSF`
            Table PSF

        Examples
        --------
        >>> import numpy as np
        >>> from astropy.coordinates import Angle
        >>> from gammapy.irf import TablePSF
        >>> TablePSF.from_shape(shape='gauss', width='0.2 deg',
        ...                     rad=Angle(np.linspace(0, 0.7, 100), 'deg'))
        """
        width = Angle(width)
        rad = Angle(rad)

        if shape == 'disk':
            amplitude = 1 / (np.pi * width.radian**2)
            psf_value = np.where(rad < width, amplitude, 0)
        elif shape == 'gauss':
            gauss2d_pdf = Gauss2DPDF(sigma=width.radian)
            psf_value = gauss2d_pdf(rad.radian)
        else:
            raise ValueError('Invalid shape: {}'.format(shape))

        psf_value = Quantity(psf_value, 'sr^-1')

        return cls(rad, psf_value)

    def info(self):
        """Print basic info."""
        ss = array_stats_str(self._rad.degree, 'offset')
        ss += 'integral = {}\n'.format(self.integral())

        for containment in [50, 68, 80, 95]:
            radius = self.containment_radius(0.01 * containment)
            ss += ('containment radius {} deg for {}%\n'.format(
                radius.degree, containment))

        return ss

    # TODO: remove because it's not flexible enough?
    def __call__(self, lon, lat):
        """Evaluate PSF at a 2D position.

        The PSF is centered on ``(0, 0)``.

        Parameters
        ----------
        lon, lat : `~astropy.coordinates.Angle`
            Longitude / latitude position

        Returns
        -------
        psf_value : `~astropy.units.Quantity`
            PSF value
        """
        center = SkyCoord(0, 0, unit='radian')
        point = SkyCoord(lon, lat)
        rad = center.separation(point)
        return self.evaluate(rad)

    def kernel(self,
               reference,
               rad_max,
               normalize=True,
               discretize_model_kwargs=dict(factor=10)):
        """
        Make a 2-dimensional kernel image.

        The kernel image is evaluated on a cartesian grid defined by the
        reference sky image.

        Parameters
        ----------
        reference : `~gammapy.image.SkyImage` or `~gammapy.cube.SkyCube`
            Reference sky image or sky cube defining the spatial grid.
        rad_max : `~astropy.coordinates.Angle`
            Radial size of the kernel
        normalize : bool
            Whether to normalize the kernel.

        Returns
        -------
        kernel : `~astropy.units.Quantity`
            Kernel 2D image of Quantities
        """
        from ..cube import SkyCube
        rad_max = Angle(rad_max)

        if isinstance(reference, SkyCube):
            reference = reference.sky_image_ref

        pixel_size = reference.wcs_pixel_scale()[0]

        def _model(x, y):
            """Model in the appropriate format for discretize_model."""
            rad = np.sqrt(x * x + y * y) * pixel_size
            return self.evaluate(rad)

        npix = int(rad_max.radian / pixel_size.radian)
        pix_range = (-npix, npix + 1)

        kernel = discretize_oversample_2D(_model,
                                          x_range=pix_range,
                                          y_range=pix_range,
                                          **discretize_model_kwargs)
        if normalize:
            kernel = kernel / kernel.sum()

        return kernel

    def evaluate(self, rad, quantity='dp_domega'):
        r"""Evaluate PSF.

        The following PSF quantities are available:

        * 'dp_domega': PDF per 2-dim solid angle :math:`\Omega` in sr^-1

            .. math:: \frac{dP}{d\Omega}

        * 'dp_dr': PDF per 1-dim offset :math:`r` in radian^-1

            .. math:: \frac{dP}{dr} = 2 \pi r \frac{dP}{d\Omega}

        Parameters
        ----------
        rad : `~astropy.coordinates.Angle`
            Offset wrt source position
        quantity : {'dp_domega', 'dp_dr'}
            Which PSF quantity?

        Returns
        -------
        psf_value : `~astropy.units.Quantity`
            PSF value
        """
        rad = Angle(rad)

        shape = rad.shape
        x = np.array(rad.radian).flat

        if quantity == 'dp_domega':
            y = self._dp_domega_spline(x)
            unit = 'sr^-1'
        elif quantity == 'dp_dr':
            y = self._dp_dr_spline(x)
            unit = 'radian^-1'
        else:
            ss = 'Invalid quantity: {}\n'.format(quantity)
            ss += "Choose one of: 'dp_domega', 'dp_dr'"
            raise ValueError(ss)

        y = np.clip(a=y, a_min=0, a_max=None)
        return Quantity(y, unit).reshape(shape)

    def integral(self, rad_min=None, rad_max=None):
        """Compute PSF integral, aka containment fraction.

        Parameters
        ----------
        rad_min, rad_max : `~astropy.units.Quantity` with angle units
            Offset angle range

        Returns
        -------
        integral : float
            PSF integral
        """
        if rad_min is None:
            rad_min = self._rad[0]
        else:
            rad_min = Angle(rad_min)

        if rad_max is None:
            rad_max = self._rad[-1]
        else:
            rad_max = Angle(rad_max)

        rad_min = self._rad_clip(rad_min)
        rad_max = self._rad_clip(rad_max)

        cdf_min = self._cdf_spline(rad_min)
        cdf_max = self._cdf_spline(rad_max)

        return cdf_max - cdf_min

    def containment_radius(self, fraction):
        """Containment radius.

        Parameters
        ----------
        fraction : array_like
            Containment fraction (range 0 .. 1)

        Returns
        -------
        rad : `~astropy.coordinates.Angle`
            Containment radius angle
        """
        rad = self._ppf_spline(fraction)
        return Angle(rad, 'radian').to('deg')

    def normalize(self):
        """Normalize PSF to unit integral.

        Computes the total PSF integral via the :math:`dP / dr` spline
        and then divides the :math:`dP / dr` array.
        """
        integral = self.integral()

        self._dp_dr /= integral

        # Don't divide by 0
        EPS = 1e-6
        rad = np.clip(self._rad.radian, EPS, None)
        rad = Quantity(rad, 'radian')
        self._dp_domega = self._dp_dr / (2 * np.pi * rad)
        self._compute_splines(self._spline_kwargs)

    def broaden(self, factor, normalize=True):
        r"""Broaden PSF by scaling the offset array.

        For a broadening factor :math:`f` and the offset
        array :math:`r`, the offset array scaled
        in the following way:

        .. math::
            r_{new} = f \times r_{old}
            \frac{dP}{dr}(r_{new}) = \frac{dP}{dr}(r_{old})

        Parameters
        ----------
        factor : float
            Broadening factor
        normalize : bool
            Normalize PSF after broadening
        """
        self._rad *= factor
        # We define broadening such that self._dp_domega remains the same
        # so we only have to re-compute self._dp_dr and the slines here.
        self._dp_dr = (2 * np.pi * self._rad * self._dp_domega).to('radian^-1')
        self._compute_splines(self._spline_kwargs)

        if normalize:
            self.normalize()

    def plot_psf_vs_rad(self, ax=None, quantity='dp_domega', **kwargs):
        """Plot PSF vs radius.

        TODO: describe PSF ``quantity`` argument in a central place and link to it from here.
        """
        import matplotlib.pyplot as plt
        ax = plt.gca() if ax is None else ax

        x = self._rad.to('deg')
        y = self.evaluate(self._rad, quantity)

        ax.plot(x.value, y.value, **kwargs)
        ax.loglog()
        ax.set_xlabel('Radius ({})'.format(x.unit))
        ax.set_ylabel('PSF ({})'.format(y.unit))

    def _compute_splines(self, spline_kwargs=DEFAULT_PSF_SPLINE_KWARGS):
        """Compute two splines representing the PSF.

        * `_dp_domega_spline` is used to evaluate the 2D PSF.
        * `_dp_dr_spline` is not really needed for most applications,
          but is available via `eval`.
        * `_cdf_spline` is used to compute integral and for normalisation.
        * `_ppf_spline` is used to compute containment radii.
        """
        from scipy.interpolate import UnivariateSpline

        # Compute spline and normalize.
        x, y = self._rad.value, self._dp_domega.value
        self._dp_domega_spline = UnivariateSpline(x, y, **spline_kwargs)

        x, y = self._rad.value, self._dp_dr.value
        self._dp_dr_spline = UnivariateSpline(x, y, **spline_kwargs)

        # We use the terminology for scipy.stats distributions
        # http://docs.scipy.org/doc/scipy/reference/tutorial/stats.html#common-methods

        # cdf = "cumulative distribution function"
        self._cdf_spline = self._dp_dr_spline.antiderivative()

        # ppf = "percent point function" (inverse of cdf)
        # Here's a discussion on methods to compute the ppf
        # http://mail.scipy.org/pipermail/scipy-user/2010-May/025237.html
        y = self._rad.value
        x = self.integral(Angle(0, 'rad'), self._rad)

        # Since scipy 1.0 the UnivariateSpline requires that x is strictly increasing
        # So only keep nodes where this is the case (and always keep the first one):
        x, idx = np.unique(x, return_index=True)
        y = y[idx]

        # Dummy values, for cases where one really doesn't have a valid PSF.
        if len(x) < 4:
            x = [0, 1, 2, 3]
            y = [0, 0, 0, 0]

        self._ppf_spline = UnivariateSpline(x, y, **spline_kwargs)

    def _rad_clip(self, rad):
        """Clip to radius support range, because spline extrapolation is unstable."""
        rad = Angle(rad, 'radian').radian
        rad = np.clip(rad, 0, self._rad[-1].radian)
        return rad
Example #45
0
    #plt.plot(xTest,yTest,'o',label="test")
    #plt.plot(xVal,yVal,'*',label="val")
    #plt.legend()
    #plt.show()


valErrors = []
ss = np.linspace(0.0001, 0.5, 100)
for s in ss:
    valErrors.append(getFit(x, y_noise, s))

plt.plot(ss, valErrors, label="Val")
plt.legend()
plt.show()

bestS = ss[valErrors.index(min(valErrors))]
sp = UnivariateSpline(x, y_noise, s=bestS)

#m = minimize(lambda s : getError(xTest,yTest,xVal,yVal,s),x0=0.25,tol=0.00001, method='nelder-mead')
m = differential_evolution(lambda s: getFit(x, y_noise, s),
                           bounds=[(0, 1)],
                           tol=0.0001)
fitS = m.x
spFit = UnivariateSpline(x, y_noise, s=fitS)
plt.plot(x, y, label="True")
plt.plot(x, y_noise, 'o', label='Data')
plt.plot(x, sp(x), label="Spline {}".format(bestS))
plt.plot(x, spFit(x), label="fit spline {}".format(fitS))
plt.legend()
plt.show()
Example #46
0
from numpy import linspace, exp
from numpy.random import randn
import matplotlib.pyplot as plt
from scipy.interpolate import UnivariateSpline

x = linspace(-3, 3, 100)
y = exp(-x**2) + randn(100) / 10
s = UnivariateSpline(x, y, s=1)
xs = linspace(-3, 3, 1000)
ys = s(xs)
plt.plot(x, y, '.-')
plt.plot(xs, ys)
plt.show()
Example #47
0
    def __init__(self, x, y, *args, **kwargs):
        UnivariateSpline.__init__(self, x, y, *args, **kwargs)

        self.xdata = sp.array(x)
        self.ydata = sp.array(y)
Example #48
0
from matplotlib import gridspec
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
import numpy as np

from matplotlib import rc

rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
rc('text', usetex=True)

np.random.seed(0)

x = np.sort(np.random.rand(1000))
gx = np.sort(np.linspace(min(x), max(x), 10))
gy = np.sort(np.random.randn(10))
f = UnivariateSpline(gx, gy)
y = f(x)
n = np.random.randn(1000) * 0.1

fig = plt.figure(figsize=(5, 4))
plot_main = fig.add_subplot(111)

plot_main.plot(x, y + n, '.', color='0.75', zorder=-100)
plot_main.plot(x, y, linewidth=2, zorder=-99)

i = [10, 500, 800]
plt.errorbar(x[i],
             y[i],
             yerr=[0.25, 0.25, 0.25],
             color='red',
             linestyle='none',
Example #49
0
def smoothing_filter(time_in,
                     val_in,
                     time_out=None,
                     relabel=None,
                     params=None):
    """
    @brief      Smoothing filter with relabeling and resampling features.

    @details    It supports evenly sampled multidimensional input signal.
                Relabeling can be used to infer the value of samples at
                time steps before and after the explicitly provided samples.
                As a reminder, relabeling is a generalization of periodicity.

    @param[in]  time_in     Time steps of the input signal (1D numpy array)
    @param[in]  val_in      Sampled values of the input signal
                            (2D numpy array: row = sample, column = time)
    @param[in]  time_out    Time steps of the output signal (1D numpy array)
    @param[in]  relabel     Relabeling matrix (identity for periodic signals)
                            Optional: Disable if omitted
    @param[in]  params      Parameters of the filter. Dictionary with keys:
                            'mixing_ratio_1': Relative time at the begining of the signal
                                            during the output signal corresponds to a
                                            linear mixing over time of the filtered and
                                            original signal. (only used if relabel is omitted)
                            'mixing_ratio_2': Relative time at the end of the signal
                                            during the output signal corresponds to a
                                            linear mixing over time of the filtered and
                                            original signal. (only used if relabel is omitted)
                            'smoothness'[0]: Smoothing factor to filter the begining of the signal
                                            (only used if relabel is omitted)
                            'smoothness'[1]: Smoothing factor to filter the end of the signal
                                            (only used if relabel is omitted)
                            'smoothness'[2]: Smoothing factor to filter the middle part of the signal

    @return     Filtered signal (2D numpy array: row = sample, column = time)
    """
    if time_out is None:
        time_out = time_in
    if params is None:
        params = dict()
        params['mixing_ratio_1'] = 0.12
        params['mixing_ratio_2'] = 0.04
        params['smoothness'] = [0.0, 0.0, 0.0]
        params['smoothness'][0] = 5e-3
        params['smoothness'][1] = 5e-3
        params['smoothness'][2] = 3e-3

    if relabel is None:
        mix_fit = [None, None, None]
        mix_fit[0] = lambda t: 0.5 * (1 + np.sin(1 / params['mixing_ratio_1'] *
                                                 ((t - time_in[0]) /
                                                  (time_in[-1] - time_in[0]
                                                   )) * np.pi - np.pi / 2))
        mix_fit[1] = lambda t: 0.5 * (
            1 + np.sin(1 / params['mixing_ratio_2'] *
                       ((t - (1 - params['mixing_ratio_2']) * time_in[-1]) /
                        (time_in[-1] - time_in[0])) * np.pi + np.pi / 2))
        mix_fit[2] = lambda t: 1

        val_fit = []
        for jj in range(val_in.shape[0]):
            val_fit_jj = []
            for kk in range(len(params['smoothness'])):
                val_fit_jj.append(
                    UnivariateSpline(time_in,
                                     val_in[jj],
                                     s=params['smoothness'][kk]))
            val_fit.append(val_fit_jj)

        time_out_mixing = [None, None, None]
        time_out_mixing_ind = [None, None, None]
        time_out_mixing_ind[
            0] = time_out < time_out[-1] * params['mixing_ratio_1']
        time_out_mixing[0] = time_out[time_out_mixing_ind[0]]
        time_out_mixing_ind[1] = time_out > time_out[-1] * (
            1 - params['mixing_ratio_2'])
        time_out_mixing[1] = time_out[time_out_mixing_ind[1]]
        time_out_mixing_ind[2] = np.logical_and(
            np.logical_not(time_out_mixing_ind[0]),
            np.logical_not(time_out_mixing_ind[1]))
        time_out_mixing[2] = time_out[time_out_mixing_ind[2]]

        val_out = np.zeros((val_in.shape[0], len(time_out)))
        for jj in range(val_in.shape[0]):
            for kk in range(len(time_out_mixing)):
                val_out[jj,time_out_mixing_ind[kk]] = \
                   (1 - mix_fit[kk](time_out_mixing[kk])) * val_fit[jj][kk](time_out_mixing[kk]) + \
                        mix_fit[kk](time_out_mixing[kk])  * val_fit[jj][-1](time_out_mixing[kk])
    else:
        time_tmp = np.concatenate(
            [time_in[:-1] - time_in[-1], time_in, time_in[1:] + time_in[-1]])
        val_in_tmp = np.concatenate(
            [relabel.dot(val_in[:, :-1]), val_in,
             relabel.dot(val_in[:, 1:])],
            axis=1)
        val_out = np.zeros((val_in.shape[0], len(time_out)))
        for jj in range(val_in_tmp.shape[0]):
            f = UnivariateSpline(time_tmp,
                                 val_in_tmp[jj],
                                 s=params['smoothness'][-1])
            val_out[jj] = f(time_out)

    return val_out
Example #50
0
def _upsample_cam(class_activation_matrix, new_dimensions):
    """Upsamples class-activation matrix (CAM).

    CAM may be 1-D, 2-D, or 3-D.

    :param class_activation_matrix: numpy array containing 1-D, 2-D, or 3-D
        class-activation matrix.
    :param new_dimensions: numpy array of new dimensions.  If matrix is
        {1D, 2D, 3D}, this must be a length-{1, 2, 3} array, respectively.
    :return: class_activation_matrix: Upsampled version of input.
    """

    num_rows_new = new_dimensions[0]
    row_indices_new = numpy.linspace(1,
                                     num_rows_new,
                                     num=num_rows_new,
                                     dtype=float)
    row_indices_orig = numpy.linspace(1,
                                      num_rows_new,
                                      num=class_activation_matrix.shape[0],
                                      dtype=float)

    if len(new_dimensions) == 1:
        # interp_object = UnivariateSpline(
        #     x=row_indices_orig, y=numpy.ravel(class_activation_matrix),
        #     k=1, s=0
        # )

        interp_object = UnivariateSpline(
            x=row_indices_orig,
            y=numpy.ravel(class_activation_matrix),
            k=3,
            s=0)

        return interp_object(row_indices_new)

    num_columns_new = new_dimensions[1]
    column_indices_new = numpy.linspace(1,
                                        num_columns_new,
                                        num=num_columns_new,
                                        dtype=float)
    column_indices_orig = numpy.linspace(1,
                                         num_columns_new,
                                         num=class_activation_matrix.shape[1],
                                         dtype=float)

    if len(new_dimensions) == 2:
        interp_object = RectBivariateSpline(x=row_indices_orig,
                                            y=column_indices_orig,
                                            z=class_activation_matrix,
                                            kx=3,
                                            ky=3,
                                            s=0)

        return interp_object(x=row_indices_new,
                             y=column_indices_new,
                             grid=True)

    num_heights_new = new_dimensions[2]
    height_indices_new = numpy.linspace(1,
                                        num_heights_new,
                                        num=num_heights_new,
                                        dtype=float)
    height_indices_orig = numpy.linspace(1,
                                         num_heights_new,
                                         num=class_activation_matrix.shape[2],
                                         dtype=float)

    interp_object = RegularGridInterpolator(points=(row_indices_orig,
                                                    column_indices_orig,
                                                    height_indices_orig),
                                            values=class_activation_matrix,
                                            method='linear')

    column_index_matrix, row_index_matrix, height_index_matrix = (
        numpy.meshgrid(column_indices_new, row_indices_new,
                       height_indices_new))
    query_point_matrix = numpy.stack(
        (row_index_matrix, column_index_matrix, height_index_matrix), axis=-1)

    return interp_object(query_point_matrix)
Example #51
0
    def rho2rho(self, rho_in, t_in=None, \
               coord_in='rho_pol', coord_out='rho_tor', extrapolate=False):
        """Mapping from/to rho_pol, rho_tor, r_V, rho_V, Psi, r_a
        r_V is the STRAHL-like radial coordinate

        Input
        ----------
        t_in : float or 1darray
            time
        rho_in : float, ndarray
            radial coordinates, 1D (time constant) or 2D+ (time variable) of size (nt,nx,...)
        coord_in:  str ['rho_pol', 'rho_tor' ,'rho_V', 'r_V', 'Psi','r_a','Psi_N']
            input coordinate label
        coord_out: str ['rho_pol', 'rho_tor' ,'rho_V', 'r_V', 'Psi','r_a','Psi_N']
            output coordinate label
        extrapolate: bool
            extrapolate rho_tor, r_V outside the separatrix

        Output
        -------
        rho : 2d+ array (nt, nr, ...)
        converted radial coordinate

        """

        if not self.eq_open:
            return
        if self.debug:
            print(('Remapping from %s to %s' % (coord_in, coord_out)))

        if t_in is None:
            t_in = self.t_eq

        tarr = np.atleast_1d(t_in)
        rho = np.atleast_1d(rho_in)

        nt_in = np.size(tarr)

        if rho.ndim == 1:
            rho = np.tile(rho, (nt_in, 1))

# Trivial case
        if coord_out == coord_in:
            return rho

        self._read_scalars()
        self._read_profiles()

        unique_idx, idx = self._get_nearest_index(tarr)

        if coord_in in ['rho_pol', 'Psi', 'Psi_N']:
            label_in = self.pf
        elif coord_in == 'rho_tor':
            label_in = self.tf
        elif coord_in in ['rho_V', 'r_V']:
            label_in = self.vol
            R0 = self.ssq['Rmag']
        elif coord_in in ['r_a', 'RMNMP']:
            R, _ = self.rhoTheta2rz(self.pf.T, [0, np.pi], coord_in='Psi')
            label_in = (R[:, 0] - R[:, 1]).T**2 / 4
        else:
            raise Exception('unsupported input coordinate')

        if coord_out in ['rho_pol', 'Psi', 'Psi_N']:
            label_out = self.pf
        elif coord_out == 'rho_tor':
            label_out = self.tf
        elif coord_out in ['rho_V', 'r_V']:
            label_out = self.vol
            R0 = self.ssq['Rmag']
        elif coord_out in ['r_a', 'RMNMP']:
            R, _ = self.rhoTheta2rz(self.pf.T[unique_idx], [0, np.pi],
                                    t_in=self.t_eq[unique_idx],
                                    coord_in='Psi')
            label_out = np.zeros_like(self.pf)
            label_out[:, unique_idx] = (R[:, 0] - R[:, 1]).T**2 / 4
        else:
            raise Exception('unsupported output coordinate')

        PFL = self.orientation * self.pf
        PSIX = self.orientation * self.psix
        PSI0 = self.orientation * self.psi0

        rho_output = np.ones_like(rho)  #*np.nan

        for i in unique_idx:

            # Calculate a normalized input and output flux
            sort_wh = np.argsort(PFL[:, i])
            #get rid of the point out of the separatrix
            ind = (label_out[sort_wh, i] != 0) & (label_in[sort_wh, i] != 0)
            ind[0] = True
            sort_wh = sort_wh[ind]

            sep_out, mag_out = np.interp([PSIX[i], PSI0[i]], PFL[sort_wh, i],
                                         label_out[sort_wh, i])
            sep_in, mag_in = np.interp([PSIX[i], PSI0[i]], PFL[sort_wh, i],
                                       label_in[sort_wh, i])

            if (abs(sep_out - mag_out) <
                    1e-4) or (abs(sep_in - mag_in) < 1e-4) or np.isnan(
                        sep_in * sep_out):  #corrupted timepoint
                #print 'corrupted'
                continue

# Normalize between 0 and 1
            Psi_out = (label_out[sort_wh, i] - mag_out) / (sep_out - mag_out)
            Psi_in = (label_in[sort_wh, i] - mag_in) / (sep_in - mag_in)

            Psi_out[(Psi_out > 1) | (Psi_out < 0)] = 0  #remove rounding errors
            Psi_in[(Psi_in > 1) | (Psi_in < 0)] = 0

            rho_out = np.r_[np.sqrt(Psi_out), 1]
            rho_in = np.r_[np.sqrt(Psi_in), 1]

            ind = (rho_out == 0) | (rho_in == 0)
            rho_out, rho_in = rho_out[~ind], rho_in[~ind]

            # Profiles can be noisy!  smooth spline must be used
            sortind = np.unique(rho_in, return_index=True)[1]
            w = np.ones_like(sortind) * rho_in[sortind]
            w = np.r_[w[1] / 2, w[1:], 1e3]
            ratio = rho_out[sortind] / rho_in[sortind]
            rho_in = np.r_[0, rho_in[sortind]]
            ratio = np.r_[ratio[0], ratio]

            s = UnivariateSpline(
                rho_in, ratio, w=w, k=4, s=5e-3, ext=3
            )  #BUG s = 5e-3 can be sometimes too much, sometimes not enought :(

            jt = idx == i
            #print  np.where(jt)[0]
            #if 826 in np.where(jt)[0]:
            #import IPython
            #IPython.embed()

            rho_ = np.copy(rho[jt])

            r0_in, r0_out = 1, 1
            if coord_in == 'r_V':
                r0_in = np.sqrt(sep_in / (2 * np.pi**2 * R0[i]))
            if coord_out == 'r_V':
                embed()
                r0_out = np.sqrt(sep_out / (2 * np.pi**2 * R0[i]))
            if coord_in == 'RMNMP':
                r0_in = np.sqrt(sep_in)
            if coord_out == 'RMNMP':
                r0_out = np.sqrt(sep_out)
            if coord_in == 'Psi':
                rho_ = np.sqrt(
                    np.maximum(0, (rho_ - self.psi0[i]) /
                               (self.psix[i] - self.psi0[i])))
            if coord_in == 'Psi_N':
                rho_ = np.sqrt(np.maximum(0, rho_))

# Evaluate spline

            rho_output[jt] = s(rho_.flatten() / r0_in).reshape(
                rho_.shape) * rho_ * r0_out / r0_in

            if np.any(np.isnan(rho_output[jt])):  # UnivariateSpline failed
                rho_output[jt] = np.interp(rho_ / r0_in, rho_in,
                                           ratio) * rho_ * r0_out / r0_in

            if not extrapolate:
                rho_output[jt] = np.minimum(rho_output[jt],
                                            r0_out)  # rounding errors

            rho_output[jt] = np.maximum(0, rho_output[jt])  # rounding errors

            if coord_out == 'Psi':
                rho_output[jt] = rho_output[jt]**2 * (
                    self.psix[i] - self.psi0[i]) + self.psi0[i]
            if coord_out == 'Psi_N':
                rho_output[jt] = rho_output[jt]**2

        return rho_output
Example #52
0
def run_opt(layout_number, wec_method_number, wake_model, opt_alg_number,
            max_wec, nsteps):
    OPENMDAO_REQUIRE_MPI = False
    run_number = layout_number
    model = wake_model
    # set model
    MODELS = ['FLORIS', 'BPA', 'JENSEN', 'LARSEN']
    print(MODELS[model])

    # select optimization approach/method
    opt_algs = ['snopt', 'ga', 'ps']
    opt_algorithm = opt_algs[opt_alg_number]

    # select wec method
    wec_methods = ['none', 'diam', 'angle', 'hybrid']
    wec_method = wec_methods[wec_method_number]

    # pop_size = 760

    # save and show options
    show_start = False
    show_end = False
    save_start = False
    save_end = False

    save_locations = True
    save_aep = True
    save_time = True
    rec_func_calls = True

    input_directory = "../../../input_files/"

    # set options for BPA
    print_ti = False
    sort_turbs = True

    # turbine_type = 'NREL5MW'            #can be 'V80' or 'NREL5MW'
    turbine_type = 'V80'  # can be 'V80' or 'NREL5MW'

    wake_model_version = 2016

    WECH = 0
    if wec_method == 'diam':
        output_directory = "../output_files/%s_wec_diam_max_wec_%i_nsteps_%.3f/" % (
            opt_algorithm, max_wec, nsteps)
        relax = True
        # expansion_factors = np.array([3, 2.75, 2.5, 2.25, 2.0, 1.75, 1.5, 1.25, 1.0, 1.0])
        expansion_factors = np.linspace(1.0, max_wec, nsteps)
        expansion_factors = np.flip(expansion_factors)
    elif wec_method == 'angle':
        output_directory = "../output_files/%s_wec_angle_max_wec_%i_nsteps_%.3f/" % (
            opt_algorithm, max_wec, nsteps)
        relax = True
        # expansion_factors = np.array([50, 40, 30, 20, 10, 0.0, 0.0])
        expansion_factors = np.linspace(0.0, max_wec, nsteps)
        expansion_factors = np.flip(expansion_factors)
    elif wec_method == 'hybrid':
        expansion_factors = np.linspace(1.0, max_wec, nsteps)
        expansion_factors = np.flip(expansion_factors)
        output_directory = "../output_files/%s_wec_hybrid_max_wec_%i_nsteps_%.3f/" % (
            opt_algorithm, max_wec, nsteps)
        relax = True
        WECH = 1
    elif wec_method == 'none':
        relax = False
        output_directory = "../output_files/%s/" % opt_algorithm
    else:
        raise ValueError('wec_method must be diam, angle, hybrid, or none')

    # create output directory if it does not exist yet
    import distutils.dir_util
    distutils.dir_util.mkpath(output_directory)

    differentiable = False

    # for expansion_factor in np.array([5., 4., 3., 2.75, 2.5, 2.25, 2.0, 1.75, 1.5, 1.25, 1.0]):
    # for expansion_factor in np.array([20., 15., 10., 5., 4., 3., 2.5, 1.25, 1.0]):
    # expansion_factors = np.array([20., 10., 5., 2.5, 1.25, 1.0])

    wake_combination_method = 1  # can be [0:Linear freestreem superposition,
    #  1:Linear upstream velocity superposition,
    #  2:Sum of squares freestream superposition,
    #  3:Sum of squares upstream velocity superposition]

    ti_calculation_method = 4  # can be [0:No added TI calculations,
    # 1:TI by Niayifar and Porte Agel altered by Annoni and Thomas,
    # 2:TI by Niayifar and Porte Agel 2016,
    # 3:TI by Niayifar and Porte Agel 2016 with added soft max function,
    # 4:TI by Niayifar and Porte Agel 2016 using area overlap ratio,
    # 5:TI by Niayifar and Porte Agel 2016 using area overlap ratio and SM function]

    if wec_method_number > 0:
        ti_opt_method = 5  # can be [0:No added TI calculations,
        # 1:TI by Niayifar and Porte Agel altered by Annoni and Thomas,
        # 2:TI by Niayifar and Porte Agel 2016,
        # 3:TI by Niayifar and Porte Agel 2016 with added soft max function,
        # 4:TI by Niayifar and Porte Agel 2016 using area overlap ratio,
        # 5:TI by Niayifar and Porte Agel 2016 using area overlap ratio and SM function]

    else:
        ti_opt_method = 5
    final_ti_opt_method = 5

    if opt_algorithm == 'ps':
        ti_opt_method = ti_calculation_method

    sm_smoothing = 700.

    if ti_calculation_method == 0:
        calc_k_star_calc = False
    else:
        calc_k_star_calc = True

    if ti_opt_method == 0:
        calc_k_star_opt = False
    else:
        calc_k_star_opt = True

    nRotorPoints = 1

    wind_rose_file = 'nantucket'  # can be one of: 'amalia', 'nantucket', 'directional

    TI = 0.108
    k_calc = 0.3837 * TI + 0.003678
    # k_calc = 0.022
    # k_opt = 0.04

    shear_exp = 0.31

    # air_density = 1.1716  # kg/m^3
    air_density = 1.225  # kg/m^3 (from Jen)

    if turbine_type == 'V80':

        # define turbine size
        rotor_diameter = 80.  # (m)
        hub_height = 70.0

        z_ref = 80.0  # m
        z_0 = 0.0

        # load performance characteristics
        cut_in_speed = 4.  # m/s
        cut_out_speed = 25.  # m/s
        rated_wind_speed = 16.  # m/s
        rated_power = 2000.  # kW
        generator_efficiency = 0.944

        ct_curve_data = np.loadtxt(input_directory +
                                   'mfg_ct_vestas_v80_niayifar2016.txt',
                                   delimiter=",")
        ct_curve_wind_speed = ct_curve_data[:, 0]
        ct_curve_ct = ct_curve_data[:, 1]

        # air_density = 1.1716  # kg/m^3
        Ar = 0.25 * np.pi * rotor_diameter**2
        # cp_curve_wind_speed = ct_curve[:, 0]
        power_data = np.loadtxt(input_directory +
                                'niayifar_vestas_v80_power_curve_observed.txt',
                                delimiter=',')
        # cp_curve_cp = niayifar_power_model(cp_curve_wind_speed)/(0.5*air_density*cp_curve_wind_speed**3*Ar)
        cp_curve_cp = power_data[:, 1] * (1E6) / (0.5 * air_density *
                                                  power_data[:, 0]**3 * Ar)
        cp_curve_wind_speed = power_data[:, 0]
        cp_curve_spline = UnivariateSpline(cp_curve_wind_speed,
                                           cp_curve_cp,
                                           ext='const')
        cp_curve_spline.set_smoothing_factor(.0001)

    elif turbine_type == 'NREL5MW':

        # define turbine size
        rotor_diameter = 126.4  # (m)
        hub_height = 90.0

        z_ref = 80.0  # m
        z_0 = 0.0

        # load performance characteristics
        cut_in_speed = 3.  # m/s
        cut_out_speed = 25.  # m/s
        rated_wind_speed = 11.4  # m/s
        rated_power = 5000.  # kW
        generator_efficiency = 0.944

        filename = input_directory + "NREL5MWCPCT_dict.p"
        # filename = "../input_files/NREL5MWCPCT_smooth_dict.p"
        import pickle

        data = pickle.load(open(filename, "rb"), encoding='latin1')
        ct_curve = np.zeros([data['wind_speed'].size, 2])
        ct_curve_wind_speed = data['wind_speed']
        ct_curve_ct = data['CT']

        # cp_curve_cp = data['CP']
        # cp_curve_wind_speed = data['wind_speed']

        loc0 = np.where(data['wind_speed'] < 11.55)
        loc1 = np.where(data['wind_speed'] > 11.7)

        cp_curve_cp = np.hstack([data['CP'][loc0], data['CP'][loc1]])
        cp_curve_wind_speed = np.hstack(
            [data['wind_speed'][loc0], data['wind_speed'][loc1]])
        cp_curve_spline = UnivariateSpline(cp_curve_wind_speed,
                                           cp_curve_cp,
                                           ext='const')
        cp_curve_spline.set_smoothing_factor(.000001)
    else:
        raise ValueError("Turbine type is undefined.")

    # load starting locations
    layout_directory = input_directory

    layout_data = np.loadtxt(
        layout_directory +
        "layouts/round_38turbs/nTurbs38_spacing5_layout_%i.txt" %
        layout_number)
    # layout_data = np.loadtxt(layout_directory + "layouts/grid_16turbs/nTurbs16_spacing5_layout_%i.txt" % layout_number)
    # layout_data = np.loadtxt(layout_directory+"layouts/nTurbs9_spacing5_layout_%i.txt" % layout_number)

    turbineX = layout_data[:, 0] * rotor_diameter + rotor_diameter / 2.
    turbineY = layout_data[:, 1] * rotor_diameter + rotor_diameter / 2.

    turbineX_init = np.copy(turbineX)
    turbineY_init = np.copy(turbineY)

    nTurbines = turbineX.size

    # create boundary specifications
    boundary_radius = 0.5 * (rotor_diameter * 4000. / 126.4 - rotor_diameter
                             )  # 1936.8
    center = np.array([boundary_radius, boundary_radius]) + rotor_diameter / 2.
    start_min_spacing = 5.
    nVertices = 1
    boundary_center_x = center[0]
    boundary_center_y = center[1]
    xmax = np.max(turbineX)
    ymax = np.max(turbineY)
    xmin = np.min(turbineX)
    ymin = np.min(turbineY)
    boundary_radius_plot = boundary_radius + 0.5 * rotor_diameter

    plot_round_farm(turbineX,
                    turbineY,
                    rotor_diameter, [boundary_center_x, boundary_center_y],
                    boundary_radius,
                    show_start=show_start)
    # quit()
    # initialize input variable arrays
    nTurbs = nTurbines
    rotorDiameter = np.zeros(nTurbs)
    hubHeight = np.zeros(nTurbs)
    axialInduction = np.zeros(nTurbs)
    Ct = np.zeros(nTurbs)
    Cp = np.zeros(nTurbs)
    generatorEfficiency = np.zeros(nTurbs)
    yaw = np.zeros(nTurbs)
    minSpacing = 2.  # number of rotor diameters

    # define initial values
    for turbI in range(0, nTurbs):
        rotorDiameter[turbI] = rotor_diameter  # m
        hubHeight[turbI] = hub_height  # m
        axialInduction[turbI] = 1.0 / 3.0
        Ct[turbI] = 4.0 * axialInduction[turbI] * (1.0 - axialInduction[turbI])
        # print(Ct)
        Cp[turbI] = 4.0 * 1.0 / 3.0 * np.power((1 - 1.0 / 3.0), 2)
        generatorEfficiency[turbI] = generator_efficiency
        yaw[turbI] = 0.  # deg.

    # Define flow properties
    if wind_rose_file is 'nantucket':
        # windRose = np.loadtxt(input_directory + 'nantucket_windrose_ave_speeds.txt')
        windRose = np.loadtxt(input_directory +
                              'nantucket_wind_rose_for_LES.txt')
        windDirections = windRose[:, 0]
        windSpeeds = windRose[:, 1]
        windFrequencies = windRose[:, 2]
        size = np.size(windDirections)
    elif wind_rose_file is 'amalia':
        windRose = np.loadtxt(
            input_directory +
            'windrose_amalia_directionally_averaged_speeds.txt')
        windDirections = windRose[:, 0]
        windSpeeds = windRose[:, 1]
        windFrequencies = windRose[:, 2]
        size = np.size(windDirections)
    elif wind_rose_file is 'directional':
        windRose = np.loadtxt(input_directory + 'directional_windrose.txt')
        windDirections = windRose[:, 0]
        windSpeeds = windRose[:, 1]
        windFrequencies = windRose[:, 2]
        size = np.size(windDirections)
    elif wind_rose_file is '1d':
        windDirections = np.array([270.])
        windSpeeds = np.array([8.0])
        windFrequencies = np.array([1.0])
        size = np.size(windDirections)
    else:
        size = 20
        windDirections = np.linspace(0, 270, size)
        windFrequencies = np.ones(size) / size

    wake_model_options = {
        'nSamples': 0,
        'nRotorPoints': nRotorPoints,
        'use_ct_curve': True,
        'ct_curve_ct': ct_curve_ct,
        'ct_curve_wind_speed': ct_curve_wind_speed,
        'interp_type': 1,
        'use_rotor_components': False,
        'differentiable': differentiable,
        'verbose': False,
        'variant': "CosineFortran"
    }

    if MODELS[model] == 'BPA':
        # initialize problem
        prob = om.Problem(
            model=OptAEP(nTurbines=nTurbs,
                         nDirections=windDirections.size,
                         nVertices=nVertices,
                         minSpacing=minSpacing,
                         differentiable=differentiable,
                         use_rotor_components=False,
                         wake_model=gauss_wrapper,
                         params_IdepVar_func=add_gauss_params_IndepVarComps,
                         params_IdepVar_args={'nRotorPoints': nRotorPoints},
                         wake_model_options=wake_model_options,
                         cp_points=cp_curve_cp.size,
                         cp_curve_spline=cp_curve_spline,
                         record_function_calls=True,
                         runparallel=False))
    elif MODELS[model] == 'FLORIS':
        # initialize problem
        prob = om.Problem(
            model=OptAEP(nTurbines=nTurbs,
                         nDirections=windDirections.size,
                         nVertices=nVertices,
                         minSpacing=minSpacing,
                         differentiable=differentiable,
                         use_rotor_components=False,
                         wake_model=floris_wrapper,
                         cp_points=cp_curve_cp.size,
                         params_IdepVar_func=add_floris_params_IndepVarComps,
                         params_IdepVar_args={},
                         record_function_calls=True))

    elif MODELS[model] == 'JENSEN':
        # initialize problem
        prob = om.Problem(
            model=OptAEP(nTurbines=nTurbs,
                         nDirections=windDirections.size,
                         nVertices=nVertices,
                         minSpacing=minSpacing,
                         differentiable=False,
                         use_rotor_components=False,
                         wake_model=jensen_wrapper,
                         wake_model_options=wake_model_options,
                         params_IdepVar_func=add_jensen_params_IndepVarComps,
                         params_IdepVar_args={},
                         runparallel=False,
                         record_function_calls=True))

    else:
        ValueError(
            'The %s model is not currently available. Please select BPA or FLORIS'
            % (MODELS[model]))
    # prob.model.deriv_options['type'] = 'fd'
    # prob.model.deriv_options['form'] = 'central'
    # prob.model.deriv_options['step_size'] = 1.0e-8
    # prob.model.linear_solver = om.LinearBlockGS()
    # prob.model.linear_solver.options['iprint'] = 0
    # prob.model.linear_solver.options['maxiter'] = 5
    #
    # prob.model.nonlinear_solver = om.NonlinearBlockGS()
    # prob.model.nonlinear_solver.options['iprint'] = 0

    # prob.model.linear_solver = om.DirectSolver()

    prob.driver = om.pyOptSparseDriver()

    if opt_algorithm == 'snopt':
        # set up optimizer
        prob.driver.options['optimizer'] = 'SNOPT'
        prob.driver.options['gradient method'] = 'snopt_fd'

        # set optimizer options
        prob.driver.opt_settings['Verify level'] = 3
        # set optimizer options
        prob.driver.opt_settings['Major optimality tolerance'] = 1e-4

        prob.driver.opt_settings[
            'Print file'] = output_directory + 'SNOPT_print_multistart_%iturbs_%sWindRose_%idirs_%sModel_RunID%i.out' % (
                nTurbs, wind_rose_file, size, MODELS[model], run_number)
        prob.driver.opt_settings[
            'Summary file'] = output_directory + 'SNOPT_summary_multistart_%iturbs_%sWindRose_%idirs_%sModel_RunID%i.out' % (
                nTurbs, wind_rose_file, size, MODELS[model], run_number)

        prob.model.add_constraint('sc',
                                  lower=np.zeros(
                                      int(((nTurbs - 1.) * nTurbs / 2.))),
                                  scaler=1E-2)  # ,
        # active_tol=(2. * rotor_diameter) ** 2)
        prob.model.add_constraint('boundaryDistances',
                                  lower=(np.zeros(1 * turbineX.size)),
                                  scaler=1E-2)  # ,
        # active_tol=2. * rotor_diameter)

        prob.driver.options['dynamic_derivs_sparsity'] = True

    elif opt_algorithm == 'ga':

        prob.driver.options['optimizer'] = 'NSGA2'

        prob.driver.opt_settings['PrintOut'] = 1

        prob.driver.opt_settings['maxGen'] = 50000

        prob.driver.opt_settings['PopSize'] = 10 * nTurbines * 2

        # prob.driver.opt_settings['pMut_real'] = 0.001

        prob.driver.opt_settings['xinit'] = 1

        prob.driver.opt_settings['rtol'] = 1E-4

        prob.driver.opt_settings['atol'] = 1E-4

        prob.driver.opt_settings['min_tol_gens'] = 200

        prob.driver.opt_settings['file_number'] = run_number

        prob.model.add_constraint('sc',
                                  lower=np.zeros(
                                      int(((nTurbs - 1.) * nTurbs / 2.))),
                                  scaler=1E-2)
        prob.model.add_constraint('boundaryDistances',
                                  lower=(np.zeros(1 * turbineX.size)),
                                  scaler=1E-2)

    elif opt_algorithm == 'ps':

        prob.driver.options['optimizer'] = 'ALPSO'

        prob.driver.opt_settings['fileout'] = 1

        prob.driver.opt_settings[
            'filename'] = output_directory + 'ALPSO_summary_multistart_%iturbs_%sWindRose_%idirs_%sModel_RunID%i.out' % (
                nTurbs, wind_rose_file, size, MODELS[model], run_number)

        prob.driver.opt_settings['maxOuterIter'] = 10000

        prob.driver.opt_settings['SwarmSize'] = 24

        prob.driver.opt_settings[
            'xinit'] = 1  # Initial Position Flag (0 - no position, 1 - position given)

        prob.driver.opt_settings[
            'Scaling'] = 1  # Design Variables Scaling Flag (0 - no scaling, 1 - scaling between [-1, 1])

        # prob.driver.opt_settings['rtol'] = 1E-3  # Relative Tolerance for Lagrange Multipliers
        #
        # prob.driver.opt_settings['atol'] = 1E-2  # Absolute Tolerance for Lagrange Function
        #
        # prob.driver.opt_settings['dtol'] = 1E-1  # Relative Tolerance in Distance of All Particles to Terminate (GCPSO)
        #
        # prob.driver.opt_settings['itol'] = 1E-3  # Absolute Tolerance for Inequality constraints
        #
        # prob.driver.opt_settings['dynInnerIter'] = 1  # Dynamic Number of Inner Iterations Flag

        prob.model.add_constraint('sc',
                                  lower=np.zeros(
                                      int(((nTurbs - 1.) * nTurbs / 2.))),
                                  scaler=1E-2)
        prob.model.add_constraint('boundaryDistances',
                                  lower=(np.zeros(1 * turbineX.size)),
                                  scaler=1E-2)

        # prob.driver.add_objective('obj', scaler=1E0)
    prob.model.add_objective('obj', scaler=1E0)

    # select design variables
    prob.model.add_design_var('turbineX',
                              scaler=1E3,
                              lower=np.zeros(nTurbines),
                              upper=np.ones(nTurbines) * 3. * boundary_radius)
    prob.model.add_design_var('turbineY',
                              scaler=1E3,
                              lower=np.zeros(nTurbines),
                              upper=np.ones(nTurbines) * 3. * boundary_radius)

    # prob.model.ln_solver.options['single_voi_relevance_reduction'] = True
    # prob.model.ln_solver.options['mode'] = 'rev'

    # if run_number == 0:
    #     # set up recorder
    #     recorder = SqliteRecorder(output_directory+'recorder_database_run%i' % run_number)
    #     recorder.options['record_params'] = True
    #     recorder.options['record_metadata'] = False
    #     recorder.options['record_unknowns'] = True
    #     recorder.options['record_derivs'] = False
    #     recorder.options['includes'] = ['turbineX', 'turbineY', 'AEP']
    #     prob.driver.add_recorder(recorder)

    # recorder = om.SqliteRecorder(output_directory + 'recorded_data.sql')
    #     # prob.driver.add_recorder(recorder)
    #     # prob.driver.recording_options['includes'] = ['']
    #     # prob.driver.recording_options['excludes'] = ['*']
    #     # prob.driver.recording_options['record_constraints'] = False
    #     # prob.driver.recording_options['record_derivatives'] = False
    #     # prob.driver.recording_options['record_desvars'] = False
    #     # prob.driver.recording_options['record_inputs'] = False
    #     # prob.driver.recording_options['record_model_metadata'] = True
    #     # prob.driver.recording_options['record_objectives'] = False
    #     # prob.driver.recording_options['record_responses'] = False

    # set up profiling
    # from plantenergy.GeneralWindFarmComponents import WindFarmAEP
    # methods = [
    #     ('*', (WindFarmAEP,))
    # ]
    #
    # iprofile.setup(methods=methods)

    print("almost time for setup")
    tic = time.time()
    print("entering setup at time = ", tic)
    prob.setup(check=True)
    toc = time.time()
    print("setup complete at time = ", toc)

    # print the results
    print(('Problem setup took %.03f sec.' % (toc - tic)))

    # assign initial values to design variables
    prob['turbineX'] = np.copy(turbineX)
    prob['turbineY'] = np.copy(turbineY)
    for direction_id in range(0, windDirections.size):
        prob['yaw%i' % direction_id] = yaw

    # assign values to constant inputs (not design variables)
    prob['rotorDiameter'] = rotorDiameter
    prob['hubHeight'] = hubHeight
    prob['axialInduction'] = axialInduction
    prob['generatorEfficiency'] = generatorEfficiency
    prob['windSpeeds'] = windSpeeds
    prob['air_density'] = air_density
    prob['windDirections'] = windDirections
    prob['windFrequencies'] = windFrequencies
    prob['Ct_in'] = Ct
    prob['Cp_in'] = Cp
    prob['cp_curve_cp'] = cp_curve_cp
    prob['cp_curve_wind_speed'] = cp_curve_wind_speed
    cutInSpeeds = np.ones(nTurbines) * cut_in_speed
    prob['cut_in_speed'] = cutInSpeeds
    ratedPowers = np.ones(nTurbines) * rated_power
    prob['rated_power'] = ratedPowers

    # assign values to turbine states
    prob['cut_in_speed'] = np.ones(nTurbines) * cut_in_speed
    prob['cut_out_speed'] = np.ones(nTurbines) * cut_out_speed
    prob['rated_power'] = np.ones(nTurbines) * rated_power
    prob['rated_wind_speed'] = np.ones(nTurbines) * rated_wind_speed
    prob['use_power_curve_definition'] = True
    prob['gen_params:CTcorrected'] = True
    prob['gen_params:CPcorrected'] = True

    # assign boundary values
    prob['boundary_center'] = np.array([boundary_center_x, boundary_center_y])
    prob['boundary_radius'] = boundary_radius

    if MODELS[model] is 'BPA':
        prob['model_params:wake_combination_method'] = np.copy(
            wake_combination_method)
        prob['model_params:ti_calculation_method'] = np.copy(
            ti_calculation_method)
        prob['model_params:wake_model_version'] = np.copy(wake_model_version)
        prob['model_params:wec_factor'] = 1.0
        prob['model_params:wec_spreading_angle'] = 0.0
        prob['model_params:calc_k_star'] = np.copy(calc_k_star_calc)
        prob['model_params:sort'] = np.copy(sort_turbs)
        prob['model_params:z_ref'] = np.copy(z_ref)
        prob['model_params:z_0'] = np.copy(z_0)
        prob['model_params:ky'] = np.copy(k_calc)
        prob['model_params:kz'] = np.copy(k_calc)
        prob['model_params:print_ti'] = np.copy(print_ti)
        prob['model_params:shear_exp'] = np.copy(shear_exp)
        prob['model_params:I'] = np.copy(TI)
        prob['model_params:sm_smoothing'] = np.copy(sm_smoothing)
        prob['model_params:WECH'] = WECH
        if nRotorPoints > 1:
            prob['model_params:RotorPointsY'], prob[
                'model_params:RotorPointsZ'] = sunflower_points(nRotorPoints)

    elif MODELS[model] is 'Jensen':
        prob['model_params:spread_angle'] = 20.0

    prob.run_model()
    AEP_init_calc = np.copy(prob['AEP'])
    print(AEP_init_calc * 1E-6)

    if MODELS[model] is 'BPA':
        prob['model_params:ti_calculation_method'] = np.copy(ti_opt_method)
        prob['model_params:calc_k_star'] = np.copy(calc_k_star_opt)

    prob.run_model()
    AEP_init_opt = np.copy(prob['AEP'])
    AEP_run_opt = np.copy(AEP_init_opt)
    print(AEP_init_opt * 1E-6)

    config.obj_func_calls_array[:] = 0.0
    config.sens_func_calls_array[:] = 0.0

    expansion_factor_last = 0.0

    tict = time.time()
    if relax:
        for expansion_factor, i in zip(
                expansion_factors,
                np.arange(0, expansion_factors.size)):  # best so far
            # print("func calls: ", config.obj_func_calls_array, np.sum(config.obj_func_calls_array))
            # print("grad func calls: ", config.sens_func_calls_array, np.sum(config.sens_func_calls_array))
            # AEP_init_run_opt = prob['AEP']

            if expansion_factor_last == expansion_factor:
                ti_opt_method = np.copy(final_ti_opt_method)

            print("starting run with exp. fac = ", expansion_factor)

            if opt_algorithm == 'snopt':
                prob.driver.opt_settings['Print file'] = output_directory + \
                                                         'SNOPT_print_multistart_%iturbs_%sWindRose_%idirs_%sModel_RunID%i_EF%.3f_TItype%i.out' % (
                                                             nTurbs, wind_rose_file, size, MODELS[model], run_number,
                                                             expansion_factor, ti_opt_method)

                prob.driver.opt_settings['Summary file'] = output_directory + \
                                                           'SNOPT_summary_multistart_%iturbs_%sWindRose_%idirs_%sModel_RunID%i_EF%.3f_TItype%i.out' % (
                                                               nTurbs, wind_rose_file, size, MODELS[model], run_number,
                                                               expansion_factor, ti_opt_method)
            elif opt_algorithm == 'ps':
                prob.driver.opt_settings[
                    'filename'] = output_directory + 'ALPSO_summary_multistart_%iturbs_%sWindRose_%idirs_%sModel_RunID%i.out' % (
                        nTurbs, wind_rose_file, size, MODELS[model],
                        run_number)

            turbineX = np.copy(prob['turbineX'])
            turbineY = np.copy(prob['turbineY'])
            prob['turbineX'] = np.copy(turbineX)
            prob['turbineY'] = np.copy(turbineY)

            if MODELS[model] is 'BPA':
                prob['model_params:ti_calculation_method'] = np.copy(
                    ti_opt_method)
                prob['model_params:calc_k_star'] = np.copy(calc_k_star_opt)
                if wec_method == 'diam':
                    prob['model_params:wec_factor'] = np.copy(expansion_factor)
                elif wec_method == "hybrid":
                    prob['model_params:wec_factor'] = np.copy(expansion_factor)
                elif wec_method == 'angle':
                    prob['model_params:wec_spreading_angle'] = np.copy(
                        expansion_factor)

            # run the problem
            print('start %s run' % (MODELS[model]))
            tic = time.time()
            # iprofile.start()
            config.obj_func_calls_array[prob.comm.rank] = 0.0
            config.sens_func_calls_array[prob.comm.rank] = 0.0
            prob.run_driver()
            # quit()
            toc = time.time()
            obj_calls = np.copy(config.obj_func_calls_array[0])
            sens_calls = np.copy(config.sens_func_calls_array[0])
            # iprofile.stop()
            toc = time.time()
            # print(np.sum(config.obj_func_calls_array))
            # print(np.sum(config.sens_func_calls_array))
            print('end %s run' % (MODELS[model]))

            run_time = toc - tic
            # print(run_time, expansion_factor)

            AEP_run_opt = np.copy(prob['AEP'])
            # print("AEP improvement = ", AEP_run_opt / AEP_init_opt)

            if MODELS[model] is 'BPA':
                prob['model_params:wec_factor'] = 1.0
                prob['model_params:wec_spreading_angle'] = 0.0
                prob['model_params:ti_calculation_method'] = np.copy(
                    ti_calculation_method)
                prob['model_params:calc_k_star'] = np.copy(calc_k_star_calc)

            prob.run_model()
            AEP_run_calc = np.copy(prob['AEP'])
            # print("compare: ", aep_run, prob['AEP'])
            print("AEP calc improvement = ", AEP_run_calc / AEP_init_calc)

            if prob.model.comm.rank == 0:
                # if save_aep:
                #     np.savetxt(output_directory + '%s_multistart_aep_results_%iturbs_%sWindRose_%idirs_%sModel_RunID%i_EF%.3f.txt' % (
                #         opt_algorithm, nTurbs, wind_rose_file, size, MODELS[model], run_number, expansion_factor),
                #                np.c_[AEP_init, prob['AEP']],
                #                header="Initial AEP, Final AEP")
                if save_locations:
                    np.savetxt(
                        output_directory +
                        '%s_multistart_locations_%iturbs_%sWindRose_%idirs_%s_run%i_EF%.3f_TItype%i.txt'
                        % (opt_algorithm, nTurbs, wind_rose_file, size,
                           MODELS[model], run_number, expansion_factor,
                           ti_opt_method),
                        np.c_[turbineX_init, turbineY_init, prob['turbineX'],
                              prob['turbineY']],
                        header=
                        "initial turbineX, initial turbineY, final turbineX, final turbineY"
                    )
                # if save_time:
                #     np.savetxt(output_directory + '%s_multistart_time_%iturbs_%sWindRose_%idirs_%s_run%i_EF%.3f.txt' % (
                #         opt_algorithm, nTurbs, wind_rose_file, size, MODELS[model], run_number, expansion_factor),
                #                np.c_[run_time],
                #                header="run time")
                if save_time and save_aep and rec_func_calls:
                    output_file = output_directory + '%s_multistart_rundata_%iturbs_%sWindRose_%idirs_%s_run%i.txt' \
                                  % (opt_algorithm, nTurbs, wind_rose_file, size, MODELS[model], run_number)
                    f = open(output_file, "a")

                    if i == 0:
                        header = "run number, exp fac, ti calc, ti opt, aep init calc (kW), aep init opt (kW), " \
                                 "aep run calc (kW), aep run opt (kW), run time (s), obj func calls, sens func calls"
                    else:
                        header = ''

                    np.savetxt(f,
                               np.c_[run_number, expansion_factor,
                                     ti_calculation_method, ti_opt_method,
                                     AEP_init_calc, AEP_init_opt, AEP_run_calc,
                                     AEP_run_opt, run_time, obj_calls,
                                     sens_calls],
                               header=header)
                    f.close()
            expansion_factor_last = expansion_factor
    else:
        # run the problem
        print('start %s run' % (MODELS[model]))
        # cProfile.run('prob.run_driver()')
        if MODELS[model] is 'BPA':
            # prob['model_params:wec_factor'] = 1.
            prob['model_params:ti_calculation_method'] = np.copy(ti_opt_method)
            prob['model_params:calc_k_star'] = np.copy(calc_k_star_opt)
        tic = time.time()
        # cProfile.run('prob.run_driver()')
        config.obj_func_calls_array[prob.comm.rank] = 0.0
        config.sens_func_calls_array[prob.comm.rank] = 0.0
        prob.run_driver()
        # quit()
        toc = time.time()
        obj_calls = np.copy(config.obj_func_calls_array[0])
        sens_calls = np.copy(config.sens_func_calls_array[0])

        run_time = toc - tic

        AEP_run_opt = np.copy(prob['AEP'])
        # print("AEP improvement = ", AEP_run_calc / AEP_init_calc)

        if MODELS[model] is 'BPA':
            prob['model_params:wec_factor'] = 1.0
            prob['model_params:wec_spreading_angle'] = 0.0
            prob['model_params:ti_calculation_method'] = np.copy(
                ti_calculation_method)
            prob['model_params:calc_k_star'] = np.copy(calc_k_star_calc)

        prob.run_model()
        AEP_run_calc = np.copy(prob['AEP'])

        if prob.model.comm.rank == 0:

            if save_locations:
                np.savetxt(
                    output_directory +
                    '%s_multistart_locations_%iturbs_%sWindRose_%idirs_%s_run%i.txt'
                    % (opt_algorithm, nTurbs, wind_rose_file, size,
                       MODELS[model], run_number),
                    np.c_[turbineX_init, turbineY_init, prob['turbineX'],
                          prob['turbineY']],
                    header=
                    "initial turbineX, initial turbineY, final turbineX, final turbineY"
                )

            if save_time and save_aep and rec_func_calls:
                output_file = output_directory + '%s_multistart_rundata_%iturbs_%sWindRose_%idirs_%s_run%i.txt' \
                              % (opt_algorithm, nTurbs, wind_rose_file, size, MODELS[model], run_number)
                f = open(output_file, "a")

                header = "run number, ti calc, ti opt, aep init calc (kW), aep init opt (kW), " \
                         "aep run calc (kW), aep run opt (kW), run time (s), obj func calls, sens func calls"

                np.savetxt(f,
                           np.c_[run_number, ti_calculation_method,
                                 ti_opt_method, AEP_init_calc, AEP_init_opt,
                                 AEP_run_calc, AEP_run_opt, run_time,
                                 obj_calls, sens_calls],
                           header=header)
                f.close()

    turbineX_end = np.copy(prob['turbineX'])
    turbineY_end = np.copy(prob['turbineY'])

    toct = time.time()
    total_time = toct - tict

    if prob.model.comm.rank == 0:

        # print the results
        print(('Opt. calculation took %.03f sec.' % (toct - tict)))

        for direction_id in range(0, windDirections.size):
            print('yaw%i (deg) = ' % direction_id,
                  prob['yaw%i' % direction_id])

        print('turbine X positions in wind frame (m): %s' % prob['turbineX'])
        print('turbine Y positions in wind frame (m): %s' % prob['turbineY'])
        print('wind farm power in each direction (kW): %s' % prob['dirPowers'])
        print('Initial AEP (kWh): %s' % AEP_init_opt)
        print('Final AEP (kWh): %s' % AEP_run_calc)
        print('AEP improvement: %s' % (AEP_run_calc / AEP_init_calc))

    plot_round_farm(turbineX_end,
                    turbineY_end,
                    rotor_diameter, [boundary_center_x, boundary_center_y],
                    boundary_radius,
                    show_start=show_end)

    return 0
Example #53
0
                        RA_line.append(temp_ra_line)

                else:
                    if abs(ny[p]*180/np.pi + 0.001 - dec) < 0.5*float(res):
                        powout_RA_line.append(float(nz[p]))
                        RA_line.append(180. - float(nx[p])*180/np.pi)

            #if ra_offset it needs to be restarted because it'll start at 180 not 0
            if args.ra_offset:
                powout_RA_line = [x for _,x in sorted(zip(RA_line,powout_RA_line))]
                RA_line = sorted(RA_line)
                #janky fix because there's two 360 values at the end
                RA_line = [0.] + RA_line[:-1]
                powout_RA_line = [powout_RA_line[-1]] + powout_RA_line[:-1]

            spline = UnivariateSpline(RA_line, powout_RA_line-np.max(powout_RA_line)/2., s=0)
            if len(spline.roots()) != 2:
                #print(spline.roots())
                #print(ra,dec)
                r1 = spline.roots()[-1]
                r2 = spline.roots()[0]
            else:
                r1 = spline.roots()[0]
                r2 = spline.roots()[1]

            diff = r2 - r1
            if diff > 180. and dec != -72.0:
                diff = r1 - (r2 -360)
                max_ra = r1 - (diff)/2.
            else:
                max_ra = r1 + (diff)/2.
Example #54
0
                                           plotting=(i == 2))
        print("histograms made")

        plt.figure(-1)
        #plt.scatter(redshift[id_ == True], colors[i][id_ == True], s = 0.01, color = 'b', label = "Blue")
        #plt.scatter(redshift[id_ == False], colors[i][id_ == False], s = 0.01, color = 'r', label = "Red")
        z_edges = np.linspace(min_z, max_z, num=z_step)
        z_middles = z_edges[:-1] + (z_edges[1] - z_edges[0]) / 2.

        idx = (cuts > 0.0) & (cuts < 1.5)
        z_middles = z_middles[idx]
        cuts = cuts[idx]
        red_con = red_con[idx]
        blue_con = blue_con[idx]
        if (i == 2):
            spl = UnivariateSpline(z_middles, cuts)
            rrange = np.linspace(0, 0.35, 1000)

            plt.plot(rrange, spl(rrange), 'k-', lw=0.3, label="spline")

            plt.ylim([0, 3])
            plt.xlim([0.03, 0.33])
            plt.legend()
            plt.xlabel("z")
            plt.ylabel(labels[i])
            plt.savefig("%s/%s_vs_z.pdf" % (output_dir, labels[i]))

        idx = (red_con > 0.0) & (blue_con > 0.0)
        red_con = red_con[idx]
        blue_con = blue_con[idx]
        contamination = np.sum(red_con) / np.sum(red_con + blue_con)
Example #55
0
def MC_variations(dummy,map_id=18388,padding_ratio=a.padding_ratio,map_size=a.map_size,\
	sep=a.sep,N_sims=a.N_sims,noise_power=a.noise_power,FWHM=a.FWHM,\
	slope=a.slope,lMin=a.lMin,lMax=a.lMax,KKmethod=a.KKmethod):
	"""Compute MC values of estimated quantities (yanked from PaddedPower.padded_estimates)"""
	
	print 'Starting %s' %dummy
	# First compute high-resolution B-mode map from padded-real space map with desired padding ratio
	from .PaddedPower import MakePaddedPower
	Bpow=MakePaddedPower(map_id,padding_ratio=padding_ratio,map_size=map_size,sep=sep)
	
	# Input directory:
	inDir=a.root_dir+'%sdeg%s/' %(map_size,sep)
	
	# Compute the noise power map using the B-mode map as a template
	from .NoisePower import noise_map
	noiseMap=noise_map(powMap=Bpow.copy(),noise_power=noise_power,FWHM=FWHM,windowFactor=Bpow.windowFactor)
	
	# Compute total map
	totMap=Bpow.copy()
	totMap.powerMap=Bpow.powerMap+noiseMap.powerMap
	
	# Apply the KK estimators
	from .KKtest import zero_estimator
	#A_est,fs_est,fc_est,Afs_est,Afc_est=zero_estimator(totMap.copy(),lMin=lMin,lMax=lMax,slope=slope,factor=1e-10,FWHM=FWHM,noise_power=noise_power,KKmethod=KKmethod)
	# (Factor is expected monpole amplitude (to speed convergence))
	
	# Compute anisotropy fraction and angle
	#ang_est=0.25*180./np.pi*np.arctan(Afs_est/Afc_est) # in degrees
	#frac_est=np.sqrt(fs_est**2.+fc_est**2.)
		
	## Run MC Simulations	
	# First compute 1D power spectrum by binning in annuli
	from hades.PowerMap import oneD_binning
	l_cen,mean_pow = oneD_binning(totMap.copy(),0.8*a.lMin,1.*a.lMax,0.8*a.l_step,binErr=False,windowFactor=Bpow.windowFactor) 
	# gives central binning l and mean power in annulus using window function corrections (from unpaddded map)
	
	# Compute univariate spline model fit to 1D power spectrum
	from scipy.interpolate import UnivariateSpline
	spline_fun = UnivariateSpline(np.log10(l_cen),np.log10(mean_pow),k=4) # compute spline of log data
	
	def model_power(ell):
		return 10.**spline_fun(np.log10(ell)) # this estimates 1D spectrum for any ell
	
	# Initialise arrays
	A_MC,fs_MC,fc_MC,Afs_MC,Afc_MC,epsilon_MC,ang_MC=[],[],[],[],[],[],[]
	
	from hades.NoisePower import single_MC
	
	for n in range(N_sims): # for each MC map
		MC_map=single_MC(totMap.copy(),model_power,windowFactor=Bpow.windowFactor) # create random map from isotropic spectrum
		output=zero_estimator(MC_map.copy(),lMin=lMin,lMax=lMax,slope=slope,factor=1e-10,FWHM=FWHM,noise_power=noise_power,KKmethod=KKmethod) 
		# compute MC anisotropy parameters  
		A_MC.append(output[0])
		fs_MC.append(output[1])
		fc_MC.append(output[2])
		Afs_MC.append(output[3])
		Afc_MC.append(output[4])
		epsilon_MC.append(np.sqrt(output[1]**2.+output[2]**2.))
		ang_MC.append(0.25*180./np.pi*np.arctan(output[3]/output[4]))
		
	return A_MC,fs_MC,fc_MC,Afs_MC,Afc_MC,epsilon_MC,ang_MC
Example #56
0
def lrs_distortion(input_model, reference_files):
    """
    The LRS-FIXEDSLIT and LRS-SLITLESS WCS pipeline.

    Transform from subarray (x, y) to (v2, v3, lambda) using
    the "specwcs" and "distortion" reference files.

    """

    # subarray to full array transform
    subarray2full = subarray_transform(input_model)

    # full array to v2v3 transform for the ordinary imager
    with DistortionModel(reference_files['distortion']) as dist:
        distortion = dist.model

    # Combine models to create subarray to v2v3 distortion
    if subarray2full is not None:
        subarray_dist = subarray2full | distortion
    else:
        subarray_dist = distortion

    # Read in the reference table data and get the zero point (SIAF reference point)
    # of the LRS in the subarray ref frame
    # We'd like to open this file as a DataModel, so we can consolidate
    # the S3 URI handling to one place.  The S3-related code here can
    # be removed once that has been changed.
    if s3_utils.is_s3_uri(reference_files['specwcs']):
        ref = fits.open(s3_utils.get_object(reference_files['specwcs']))
    else:
        ref = fits.open(reference_files['specwcs'])
    with ref:
        lrsdata = np.array([d for d in ref[1].data])
        # Get the zero point from the reference data.
        # The zero_point is X, Y  (which should be COLUMN, ROW)
        # These are 1-indexed in CDP-7 (i.e., SIAF convention) so must be converted to 0-indexed
        if input_model.meta.exposure.type.lower() == 'mir_lrs-fixedslit':
            zero_point = ref[0].header['imx'] - 1, ref[0].header['imy'] - 1
        elif input_model.meta.exposure.type.lower() == 'mir_lrs-slitless':
            zero_point = ref[0].header['imxsltl'] - 1, ref[0].header['imysltl'] - 1
            # Transform to slitless subarray from full array
            zero_point = subarray2full.inverse(zero_point[0], zero_point[1])

    # In the lrsdata reference table, X_center,y_center,wavelength describe the location of the
    # centroid trace along the detector in pixels relative to nominal location.
    # x0,y0(ul) x1,y1 (ur) x2,y2(lr) x3,y3(ll) define corners of the box within which the distortion
    # and wavelength calibration was derived
    xcen = lrsdata[:, 0]
    ycen = lrsdata[:, 1]
    wavetab = lrsdata[:, 2]
    x0 = lrsdata[:, 3]
    y0 = lrsdata[:, 4]
    x1 = lrsdata[:, 5]
    y2 = lrsdata[:, 8]

    # If in fixed slit mode, define the bounding box using the corner locations provided in
    # the CDP reference file.
    if input_model.meta.exposure.type.lower() == 'mir_lrs-fixedslit':

        bb_sub = ((np.floor(x0.min() + zero_point[0]) - 0.5, np.ceil(x1.max() + zero_point[0]) + 0.5),
                  (np.floor(y2.min() + zero_point[1]) - 0.5, np.ceil(y0.max() + zero_point[1]) + 0.5))

    # If in slitless mode, define the bounding box X locations using the subarray x boundaries
    # and the y locations using the corner locations in the CDP reference file.  Make sure to
    # omit the 4 reference pixels on the left edge of slitless subarray.
    if input_model.meta.exposure.type.lower() == 'mir_lrs-slitless':
        bb_sub = ((input_model.meta.subarray.xstart - 1 + 4 - 0.5, input_model.meta.subarray.xsize - 1 + 0.5),
                  (np.floor(y2.min() + zero_point[1]) - 0.5, np.ceil(y0.max() + zero_point[1]) + 0.5))

    # Find the ROW of the zero point
    row_zero_point = zero_point[1]

    # The inputs to the "detector_to_v2v3" transform are
    # - the indices in x spanning the entire image row
    # - y is the y-value of the zero point
    # This is equivalent of making a vector of x, y locations for
    # every pixel in the reference row
    const1d = models.Const1D(row_zero_point)
    const1d.inverse = models.Const1D(row_zero_point)
    det_to_v2v3 = models.Identity(1) & const1d | subarray_dist

    # Now deal with the fact that the spectral trace isn't perfectly up and down along detector.
    # This information is contained in the xcenter/ycenter values in the CDP table, but we'll handle it
    # as a simple rotation using a linear fit to this relation provided by the CDP.

    z = np.polyfit(xcen, ycen, 1)
    slope = 1. / z[0]
    traceangle = np.arctan(slope) * 180. / np.pi  # trace angle in degrees
    rot = models.Rotation2D(traceangle)  # Rotation model

    # Now include this rotation in our overall transform
    # First shift to a frame relative to the trace zeropoint, then apply the rotation
    # to correct for the curved trace.  End in a rotated frame relative to zero at the reference point
    # and where yrot is aligned with the spectral trace)
    xysubtoxyrot = models.Shift(-zero_point[0]) & models.Shift(-zero_point[1]) | rot

    # Next shift back to the subarray frame, and then map to v2v3
    xyrottov2v3 = models.Shift(zero_point[0]) & models.Shift(zero_point[1]) | det_to_v2v3

    # The two models together
    xysubtov2v3 = xysubtoxyrot | xyrottov2v3

    # Work out the spectral component of the transform
    # First compute the reference trace in the rotated-Y frame
    xcenrot, ycenrot = rot(xcen, ycen)
    # The input table of wavelengths isn't perfect, and the delta-wavelength
    # steps show some unphysical behaviour
    # Therefore fit with a spline for the ycenrot->wavelength transform
    # Reverse vectors so that yinv is increasing (needed for spline fitting function)
    yrev = ycenrot[::-1]
    wrev = wavetab[::-1]
    # Spline fit with enforced smoothness
    spl = UnivariateSpline(yrev, wrev, s=0.002)
    # Evaluate the fit at the rotated-y reference points
    wavereference = spl(yrev)
    # wavereference now contains the wavelengths corresponding to regularly-sampled ycenrot, create the model
    wavemodel = models.Tabular1D(lookup_table=wavereference, points=yrev, name='waveref',
                                 bounds_error=False, fill_value=np.nan)

    # Now construct the inverse spectral transform.
    # First we need to create a spline going from wavereference -> ycenrot
    spl2 = UnivariateSpline(wavereference[::-1], ycenrot, s=0.002)
    # Make a uniform grid of wavelength points from min to max, sampled according
    # to the minimum delta in the input table
    dw = np.amin(np.absolute(np.diff(wavereference)))
    wmin = np.amin(wavereference)
    wmax = np.amax(wavereference)
    wgrid = np.arange(wmin, wmax, dw)
    # Evaluate the rotated y locations of the grid
    ygrid = spl2(wgrid)
    # ygrid now contains the rotated y pixel locations corresponding to
    # regularly-sampled wavelengths, create the model
    wavemodel.inverse = models.Tabular1D(lookup_table=ygrid, points=wgrid, name='waverefinv',
                                         bounds_error=False, fill_value=np.nan)

    # Wavelength barycentric correction
    try:
        velosys = input_model.meta.wcsinfo.velosys
    except AttributeError:
        pass
    else:
        if velosys is not None:
            velocity_corr = velocity_correction(input_model.meta.wcsinfo.velosys)
            wavemodel = wavemodel | velocity_corr
            log.info("Applied Barycentric velocity correction : {}".format(velocity_corr[1].amplitude.value))

    # Construct the full distortion model (xsub,ysub -> v2,v3,wavelength)
    lrs_wav_model = xysubtoxyrot | models.Mapping([1], n_inputs=2) | wavemodel
    dettotel = models.Mapping((0, 1, 0, 1)) | xysubtov2v3 & lrs_wav_model

    # Construct the inverse distortion model (v2,v3,wavelength -> xsub,ysub)
    # Transform to get xrot from v2,v3
    v2v3toxrot = subarray_dist.inverse | xysubtoxyrot | models.Mapping([0], n_inputs=2)
    # wavemodel.inverse gives yrot from wavelength
    # v2,v3,lambda -> xrot,yrot
    xform1 = v2v3toxrot & wavemodel.inverse
    dettotel.inverse = xform1 | xysubtoxyrot.inverse

    # Bounding box is the subarray bounding box, because we're assuming subarray coordinates passed in
    dettotel.bounding_box = bb_sub[::-1]

    return dettotel
Example #57
0
 def D_p(self, N):
     yn11 = self.sol()[:, 4]
     ynn11 = UnivariateSpline(self.n1, yn11, k=3, s=0)
     return ynn11(N)
Example #58
0
err = err2[:, 0]
nn_err = np.loadtxt('./NNerr.csv', delimiter=',')
nn_err = np.abs(nn_err)
print(np.mean(err))
print(np.max(err))
print(np.min(err))
print(np.mean(nn_err))
print(np.max(nn_err))
print(np.min(nn_err))
plt.plot(Y_pred[:, 0], label='PLS预测辛烷值')
plt.plot(trainY[:, 0], label='实际辛烷值')

ecdf = sm.distributions.ECDF(err)
x = np.linspace(min(err), max(err))
y = ecdf(x)
func = UnivariateSpline(x, y)
xnew = np.arange(min(err), max(err), 0.000001)
ynew = func(xnew)**q
plt.plot(xnew, ynew, 'b-', label='PLS')

ecdf = sm.distributions.ECDF(nn_err)
x = np.linspace(min(nn_err), max(nn_err))
y = ecdf(x)
func = UnivariateSpline(x, y)
xnew = np.arange(min(err), max(err), 0.000001)
ynew = func(xnew)**q
plt.plot(xnew, ynew, 'r-', label='BP')

plt.xlabel('相对误差')
plt.ylabel('累积概率')
plt.title('CDF')
 while k < data_set[j][0][-1]:
     output_x.append(k)
     k += step_size
 output_y = t(output_x)
 for m in range(len(output_x)):
     line = str(output_x[m])
     line = line+"     "+str(output_y[m])+"\n"
     output.write(line)
 single_orb = []
 single_orb.append(output_x)
 single_orb.append(output_y)
 # store single_orb into data_set
 output_set.append(single_orb)
 # open graphs to show final output
 # comment out below to circumvent graphing
 v = UnivariateSpline(output_x, output_y, k=5, s=0.0)
 psi_d   = v(output_x,1)
 psi_dd  = v(output_x,2)
 psi_ddd = v(output_x,3)
 pyl.close()
 pyl.figure(1)
 pyl.subplot(511)
 pyl.title('compare input (red) and output (blue)' )
 pyl.xlabel('r')
 pyl.ylabel('Psi(r)')
 pyl.semilogy(data_set[j][0], data_set[j][1],'-r')
 pyl.semilogy(output_set[j][0], output_set[j][1],'-b')
 pyl.subplot(512)
 pyl.xlabel('r')
 pyl.ylabel('Psi(r)')    
 pyl.plot(data_set[j][0], data_set[j][1],'-r')
Example #60
0
    def get_Paz(self, az_data, R_data, jns):
        """ 
        Computes probability of line of sight acceleration at projected R : P(az|R)  
        """

        # Under construction !!!

        # Return P(az|R)

        az_data = abs(az_data)  # Consider only positive values

        # Assumes units of az [m/s^2] if self.G ==0.004302, else models units
        # Conversion factor from [pc (km/s)^2/Msun] -> [m/s^2]
        az_fac = 1. / 3.0857e10 if (self.G == 0.004302) else 1

        if (R_data < self.rt):
            nz = self.nstep  # Number of z value equal to number of r values
            zt = sqrt(self.rt**2 - R_data**2)  # maximum z value at R

            z = numpy.logspace(log10(self.r[1]), log10(zt), nz)

            spl_Mr = UnivariateSpline(self.r, self.mc, s=0,
                                      ext=1)  # Spline for enclosed mass

            r = sqrt(R_data**2 + z**2)  # Local r array
            az = self.G * spl_Mr(r) * z / r**3  # Acceleration along los
            az[-1] = self.G * spl_Mr(
                self.rt) * zt / self.rt**3  # Ensure non-zero final data point

            az *= az_fac  # convert to [m/s^2]
            az_spl = UnivariateSpline(
                z, az, k=4, s=0,
                ext=1)  # 4th order needed to find max (can be done easier?)

            zmax = az_spl.derivative().roots(
            )  # z where az = max(az), can be done without 4th order spline?
            azt = az[-1]  # acceleration at the max(z) = sqrt(r_t**2 - R**2)

            # Setup spline for rho(z)
            if jns == 0 and self.nmbin == 1:
                rho = self.rho
            else:
                rho = self.rhoj[jns]

            rho_spl = UnivariateSpline(self.r, rho, ext=1, s=0)
            rhoz = rho_spl(sqrt(z**2 + R_data**2))
            rhoz_spl = UnivariateSpline(z, rhoz, ext=1, s=0)

            # Now compute P(a_z|R)
            # There are 2 possibilities depending on R:
            #  (1) the maximum acceleration occurs within the cluster boundary, or
            #  (2) max(a_z) = a_z,t (this happens when R ~ r_t)

            nr, k = nz, 3  # bit of experimenting

            # Option (1): zmax < max(z)
            if len(zmax) > 0:
                zmax = zmax[
                    0]  # Take first entry for the rare cases with multiple peaks
                # Set up 2 splines for the inverse z(a_z) for z < zmax and z > zmax
                z1 = numpy.linspace(z[0], zmax, nr)
                z2 = (numpy.linspace(zmax, z[-1],
                                     nr))[::-1]  # Reverse z for ascending az

                z1_spl = UnivariateSpline(az_spl(z1), z1, k=k, s=0, ext=1)
                z2_spl = UnivariateSpline(az_spl(z2), z2, k=k, s=0, ext=1)

            # Option 2: zmax = max(z)
            else:
                zmax = z[-1]
                z1 = numpy.linspace(z[0], zmax, nr)
                z1_spl = UnivariateSpline(az_spl(z1), z1, k=k, s=0, ext=1)

            # Maximum acceleration along this los
            azmax = az_spl(zmax)

            # Now determine P(az_data|R)
            if (az_data < azmax):
                z1 = max([z1_spl(az_data),
                          z[0]])  # first radius where az = az_data
                Paz = rhoz_spl(z1) / abs(az_spl.derivatives(z1)[1])

                if (az_data > azt):
                    # Find z where a_z = a_z,t
                    z2 = z2_spl(az_data)
                    Paz += rhoz_spl(z2) / abs(az_spl.derivatives(z2)[1])

                # Normalize to 1
                Paz /= rhoz_spl.integral(0, zt)
                self.z = z
                self.az = az
                self.Paz = Paz
                self.azmax = azmax
                self.zmax = zmax
            else:
                self.Paz = 0
        else:
            self.Paz = 0

        return