def interpolate_coordinates(self, x_list, y_list, curve_extrusion):
     y_temp = []
     x_temp = []
     if curve_extrusion:
         interp_func = interpolate.interp1d(y_list,
                                            x_list,
                                            kind="cubic",
                                            fill_value="extrapolate")
     else:
         interp_func = interpolate.interp1d(y_list,
                                            x_list,
                                            kind="linear",
                                            fill_value="extrapolate")
     delta = 0.01
     for x, y in zip(x_list, y_list):
         index = y
         try:
             while index < y_list[y_list.index(y) + 1]:
                 index += delta
                 x_index = float(interp_func(index))
                 x_temp.append(x_index)
                 y_temp.append(index)
         except:
             pass
     return x_temp, y_temp
Example #2
0
    def sanitize_data(self):
        """Fill the series via interpolation"""
        validx = None
        validy = None
        countx = None
        county = None
        if self.x is not None:
            validx = np.sum(np.isfinite(self.x))
            countx = float(self.x.size)
        else:
            raise ValueError(
                "The x-axis is not populated, calculate values before you interpolate."
            )
        if self.y is not None:
            validy = np.sum(np.isfinite(self.y))
            county = float(self.y.size)
        else:
            raise ValueError(
                "The y-axis is not populated, calculate values before you interpolate."
            )

        if min([validx / countx, validy / county]) < self.VALID_REQ:
            warnings.warn(
                "Poor data quality, there are not enough valid entries for x ({0:f}/{1:f}) or y ({2:f}/{3:f})."
                .format(validx, countx, validy, county), UserWarning)
        # TODO: use filter and cubic splines!
        #filter = np.logical_and(np.isfinite(self.x),np.isfinite(self.y))
        if validy > validx:
            y = self.y[np.isfinite(self.y)]
            self.x = interp1d(self.y, self.x, kind='linear')(y)
            self.y = y
        else:
            x = self.x[np.isfinite(self.x)]
            self.y = interp1d(self.x, self.y, kind='linear')(x)
            self.x = x
Example #3
0
 def sanitize_data(self):
     """Fill the series via interpolation"""
     validx = None; validy = None
     countx = None; county = None  
     if self.x is not None:
         validx = np.sum(np.isfinite(self.x))
         countx = float(self.x.size)
     else: 
         raise ValueError("The x-axis is not populated, calculate values before you interpolate.")
     if self.y is not None:
         validy = np.sum(np.isfinite(self.y))
         county = float(self.y.size)
     else: 
         raise ValueError("The y-axis is not populated, calculate values before you interpolate.")
     
     if min([validx/countx,validy/county]) < self.VALID_REQ: 
         warnings.warn(
           "Poor data quality, there are not enough valid entries for x ({0:f}/{1:f}) or y ({2:f}/{3:f}).".format(validx,countx,validy,county),
           UserWarning)
     # TODO: use filter and cubic splines!
     #filter = np.logical_and(np.isfinite(self.x),np.isfinite(self.y))
     if validy > validx:
         y = self.y[np.isfinite(self.y)]
         self.x = interp1d(self.y, self.x, kind='linear')(y)
         self.y = y
     else:
         x = self.x[np.isfinite(self.x)] 
         self.y = interp1d(self.x, self.y, kind='linear')(x)
         self.x = x
Example #4
0
 def __init__(self, redshift, absnap, hubble = 0.71, fbar=0.17, units=None, sf_neutral=True):
     if units is not None:
         self.units = units
     else:
         self.units = unitsystem.UnitSystem()
     self.absnap = absnap
     self.f_bar = fbar
     self.redshift = redshift
     self.sf_neutral = sf_neutral
     #Interpolate for opacity and gamma_UVB
     #Opacities for the FG09 UVB from Rahmati 2012.
     #IMPORTANT: The values given for z > 5 are calculated by fitting a power law and extrapolating.
     #Gray power law was: -1.12e-19*(zz-3.5)+2.1e-18 fit to z > 2.
     #gamma_UVB was: -8.66e-14*(zz-3.5)+4.84e-13
     #This is clearly wrong, but this model is equally a poor choice at these redshifts anyway.
     gray_opac = [2.59e-18,2.37e-18,2.27e-18, 2.15e-18, 2.02e-18, 1.94e-18, 1.82e-18, 1.71e-18, 1.60e-18]
     gamma_UVB = [3.99e-14, 3.03e-13, 6e-13, 5.53e-13, 4.31e-13, 3.52e-13, 2.678e-13,  1.81e-13, 9.43e-14]
     zz = [0, 1, 2, 3, 4, 5, 6, 7,8]
     self.redshift_coverage = True
     if redshift > zz[-1]:
         self.redshift_coverage = False
         print("Warning: no self-shielding at z=",redshift)
     else:
         gamma_inter = intp.interp1d(zz,gamma_UVB)
         gray_inter = intp.interp1d(zz,gray_opac)
         self.gray_opac = gray_inter(redshift)
         self.gamma_UVB = gamma_inter(redshift)
     #self.hy_mass = 0.76 # Hydrogen massfrac
     self.gamma=5./3
     #Boltzmann constant (cgs)
     self.boltzmann=1.38066e-16
     self.hubble = hubble
     #Physical density threshold for star formation in H atoms / cm^3
     self.PhysDensThresh = self._get_rho_thresh(hubble)
Example #5
0
	def __init__(self, a_sound, t_age):
		'''Initialize physical parameters and create interpolation objects'''
		from numpy import array
		from scipy.interpolate import interpolate as interp
		from math import log10

		# Init sound speed and collapse age
		self.a_sound = float(a_sound)
		self.t_age = float(t_age)

		# Dimensionless parameters given in table 2 of the paper.
		# These describe the "expansion-wave collapse solution" where A = 2+
		# (see section II-c for details)
		table2_x = array([0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1])
		table2_alpha = array([71.5, 27.8, 16.4, 11.5, 8.76, 7.09, 5.95, 5.14, 4.52, 4.04, 3.66, 3.35, 3.08, 2.86, 2.67, 2.5, 2.35, 2.22, 2.1, 2])
		table2_negv = array([5.44, 3.47, 2.58, 2.05, 1.68, 1.4, 1.18, 1.01, 0.861, 0.735, 0.625, 0.528, 0.442, 0.363, 0.291, 0.225, 0.163, 0.106, 0.051, 0])

		# Set interpolation limits
		self.xmin = table2_x[0]
		self.xmax = table2_x[-1]

		# Init interpolation objects:
		#   alpha is best interpolated linearly in log space
		log10_x = [log10(x) for x in table2_x]
		log10_alpha = [log10(alpha) for alpha in table2_alpha]
		self.alpha_interp = interp.interp1d(log10_x, log10_alpha, kind='linear')

		#   neg_v is best interpolated cubically
		self.negv_interp = interp.interp1d(table2_x, table2_negv, kind='cubic')
Example #6
0
    def __init__(self, redshift,hubble = 0.71, fbar=0.17, molec = True, UnitLength_in_cm=3.085678e21, UnitMass_in_g=1.989e43):
        self.f_bar = fbar
        self.redshift = redshift
        self.molec = molec
        #Some constants and unit systems
        #Internal gadget mass unit: 1e10 M_sun/h in g/h
        #UnitMass_in_g=1.989e43
        #Internal gadget length unit: 1 kpc/h in cm/h
        self.UnitLength_in_cm=UnitLength_in_cm
        self.UnitDensity_in_cgs = UnitMass_in_g/self.UnitLength_in_cm**3
        #Internal velocity unit : 1 km/s in cm/s
        self.UnitVelocity_in_cm_per_s=1e5
        #proton mass in g
        self.protonmass=1.67262178e-24
        #self.hy_mass = 0.76 # Hydrogen massfrac
        self.gamma=5./3
        #Boltzmann constant (cgs)
        self.boltzmann=1.38066e-16

        self.hubble = hubble

        self.star = StarFormation(hubble)
        #Physical density threshold for star formation in H atoms / cm^3
        self.PhysDensThresh = self.star.get_rho_thresh()
        if redshift > zz[-1]:
            self.redshift_coverage = False
            print("Warning: no self-shielding at z=",redshift)
        #Interpolate for opacity and gamma_UVB
        gamma_inter = intp.interp1d(zz,gamma_UVB)
        gray_inter = intp.interp1d(zz,gray_opac)
        self.gray_opac = gray_inter(redshift)
        self.gamma_UVB = gamma_inter(redshift)
def get_ratios(H, t_pd, beta_d, Ls):
    # compute ratios of the estimated lake length (dL) and water volume change (dV)
    # relative to their true values given the true lake length (Ls),
    # dimensional friction (beta_d), and ice thickness (H)

    # discretization in frequency domain
    N = 2000
    x = np.linspace(-100, 100, num=N)
    d = np.abs(x[1] - x[0])
    k = fftfreq(N, d)  # frequency
    k[0] = 1e-10  # set zero frequency to small number due to (integrable) singularity
    k *= 2 * np.pi  # convert to SciPy's Fourier transform definition (angular
    # freq. definition) used in notes

    w = w_base(x, Ls / H)  # compute basal velocity anomaly

    w_ft = fft(w)  # fourier transform for numerical method

    beta_nd = beta_d * H / (2 * eta)  # non-dimensional friction parameter
    # relative to viscosity/ice thickness

    tr = (4 * np.pi * eta) / (rho * g * H)  # relaxation time
    lamda = t_pd / tr  # ratio of oscillation time to relaxation time

    D1, D2 = get_Dj(lamda, beta_nd, w_ft, k)  # compute surface displacements

    T1, T2 = get_Tj(D1, D2, x, H)  # compute estimated highstand/lowstand times

    kappa1, kappa2 = get_kappaj(T1, T2)  # compute weights for displacements

    dH = kappa1 * D1 + kappa2 * D2  # compute surface elevation change anomaly

    dS = 2 * w  # elevation change at base

    # interpolate displacements for integration
    dSi = interpolate.interp1d(x, dS, fill_value="extrapolate")
    dHi = interpolate.interp1d(x, dH, fill_value="extrapolate")

    dVs = integrate.quad(dSi, -0.5 * Ls / H, 0.5 * Ls / H, full_output=1)[0]

    # compute estimated lake length
    if np.size(x[np.abs(dH) > delta]) > 0:
        x0 = x[np.abs(dH) > delta]
    else:
        x0 = 0 * x

    Lh = 2 * np.max(x0)  # (problem is symmetric with respect to x)

    if Lh > 1e-5:
        dVh = integrate.quad(dHi, -0.5 * Lh, 0.5 * Lh, full_output=1)[0]
        dV = dVh / dVs
        dL = Lh * H / Ls
        lag = (2 / np.pi) * (np.pi - T1)

    else:
        dV = 0
        dL = 0
        lag = 1.01

    return dV, dL, lag
Example #8
0
    def errfunc(p, plot_it=False, getSpec=False):

        uWarp = interp.interp1d([2500] + uOverLap, [abs(p[0]), 1., 1.],
                                bounds_error=False,
                                fill_value=1.)

        zWarp = interp.interp1d(zOverLap + [11000], [1., 1., abs(p[1])],
                                bounds_error=False,
                                fill_value=1.)

        specStitch_0 = (uSpec[:, 0].tolist() + specSDSS[:, 0].tolist() +
                        zSpec[:, 0].tolist())
        specStitch_1 = (uSpec[:, 1].tolist() + specSDSS[:, 1].tolist() +
                        zSpec[:, 1].tolist())

        specStitch = scipy.array(
            zip(specStitch_0,
                specStitch_1 * uWarp(specStitch_0) * zWarp(specStitch_0)))
        mags = synth([1., 0, 0, 0], [[specStitch]], filters)
        #print mags
        #raw_input()

        if False:  #getSpec: #plot_it:
            import pylab
            pylab.plot(pickleSpec[:, 0],
                       uWarp(pickleSpec[:, 0]) * zWarp(pickleSpec[:, 0]),
                       color='red')
            pylab.xlim([3000, 10000])
            pylab.show()

            #plot(specStitch,scipy.array(zip(pickleSpec[:,0].tolist(),(uWarp(pickleSpec[:,0])*zWarp(pickleSpec[:,0])*pickleSpec[:,1]).tolist())))

            #plot(specStitch,scipy.array(zip(specStitch[:,0].tolist(),(uWarp(specStitch[:,0])*zWarp(specStitch[:,0])*specStitch[:,1]).tolist())))

            plot(specStitch, specStitch[:, 1])

            pylab.show()

        #print mags
        ugdiff = (mags['USDSS'] - mags['GSDSS'] -
                  locus_list['USDSS_GSDSS'][locus_index])

        #urdiff = (mags['USDSS'] - mags['RSDSS'] - locus_list['USDSS_RSDSS'][locus_index])
        gzdiff = (mags['GSDSS'] - mags['ZSDSS'] -
                  locus_list['GSDSS_ZSDSS'][locus_index])
        izdiff = (mags['ISDSS'] - mags['ZSDSS'] -
                  locus_list['ISDSS_ZSDSS'][locus_index])
        ridiff = (mags['RSDSS'] - mags['ISDSS'] -
                  locus_list['RSDSS_ISDSS'][locus_index])
        stat = (ugdiff**2. + gzdiff**2. + izdiff**2. + ridiff**2.)

        print(locus_list['GSDSS_RSDSS'][locus_index]
              ), mags['GSDSS'] - mags['RSDSS']

        print ugdiff, gzdiff, izdiff, stat

        if getSpec: return specStitch
        else:
            return stat
Example #9
0
def resample_sweepkey_by_curve(df_sweep_results,
                               sweep_key,
                               n_smp_rise=100,
                               n_smp_fall=100):
    # geometric mean of acc_tr, acc_ts
    # curve = np.sqrt(df_sweep_results.Acc_tr.values * df_sweep_results.Acc_ts.values)
    curve = 2*(df_sweep_results.Acc_tr.values * df_sweep_results.Acc_ts.values) / (
            df_sweep_results.Acc_tr.values + df_sweep_results.Acc_ts.values + \
            ml_utils.epsilon())
    amax = curve.argmax()
    if amax == 0 or amax == len(curve) - 1:
        tmin = df_sweep_results[sweep_key].min()
        tmax = df_sweep_results[sweep_key].max()

        warn(f'Argmax  ={amax} is on edge on thresholds range. Please '
             f'increase that range [{tmin}, {tmax}]')
        if amax == 0:
            amax += 1
        elif amax == len(curve) - 1:
            amax -= 1

    x_rise = curve[:(amax + 1)]
    y_rise = df_sweep_results[sweep_key].iloc[:(amax + 1)]

    x_fall = curve[amax:]
    y_fall = df_sweep_results[sweep_key].iloc[amax:]

    f_th_vs_metric_rise = interpolate.interp1d(x_rise, y_rise)
    f_th_vs_metric_fall = interpolate.interp1d(x_fall, y_fall)

    # making most of the sampling around the peak
    rise_range = np.sort(
        np.unique(
            np.block([
                np.linspace(x_rise.min(),
                            x_rise.min() + (x_rise.max() - x_rise.min()) * 0.9,
                            int(n_smp_rise * 0.25)),
                np.linspace(x_rise.min() + (x_rise.max() - x_rise.min()) * 0.9,
                            x_rise.max(), int(n_smp_rise * 0.75))
            ])))
    fall_range = np.sort(
        np.unique(
            np.block([
                np.linspace(x_fall.min(),
                            x_fall.min() + (x_fall.max() - x_fall.min()) * 0.9,
                            int(n_smp_fall * 0.25)),
                np.linspace(
                    x_fall.min() + (x_fall.max() - x_fall.min()) * 0.75,
                    x_fall.max(), int(n_smp_fall * 0.9))
            ])))
    # rise_range = []
    sweep_key_range_rise = [f_th_vs_metric_rise(x) for x in rise_range]
    sweep_key_range_fall = [f_th_vs_metric_fall(x) for x in fall_range]
    sweep_key_equi_dist = np.sort(
        np.unique(sweep_key_range_rise + sweep_key_range_fall))
    return sweep_key_equi_dist
Example #10
0
def interpolate_mags_lbol(model_table,
                          filts=["u", "g", "r", "i", "z", "y", "J", "H", "K"],
                          magidxs=[0, 1, 2, 3, 4, 5, 6, 7, 8]):
    """
    """
    from scipy.interpolate import interpolate as interp
    tt = np.arange(model_table['tini'][0],
                   model_table['tmax'][0] + model_table['dt'][0],
                   model_table['dt'][0])
    mag_all = {}
    lbol_all = np.empty((0, len(tt)), float)

    for filt in filts:
        mag_all[filt] = np.empty((0, len(tt)))

    for row in model_table:
        t, lbol, mag = row["t"], row["lbol"], row["mag"]

        if np.sum(lbol) == 0.0:
            continue

        allfilts = True
        for filt, magidx in zip(filts, magidxs):
            idx = np.where(~np.isnan(mag[magidx]))[0]
            if len(idx) == 0:
                allfilts = False
                break

        if not allfilts: continue

        for filt, magidx in zip(filts, magidxs):
            idx = np.where(~np.isnan(mag[magidx]))[0]
            f = interp.interp1d(t[idx],
                                mag[magidx][idx],
                                fill_value='extrapolate')
            maginterp = f(tt)
            mag_all[filt] = np.append(mag_all[filt], [maginterp], axis=0)

        idx = np.where((~np.isnan(np.log10(lbol))) & ~(lbol == 0))[0]
        f = interp.interp1d(t[idx],
                            np.log10(lbol[idx]),
                            fill_value='extrapolate')
        lbolinterp = 10**f(tt)
        lbol_all = np.append(lbol_all, [lbolinterp], axis=0)

    # Ad to model table
    model_table["lbol"] = lbol_all
    for filt in filts:
        model_table["lbol"] = lbol
        model_table["mag_%s" % filt] = mag_all[filt]

    return model_table
Example #11
0
def _set_bounds(m, t,
                index_name='profile_index',
                up_profile_name='up_profile_value',
                low_profile_name='low_profile_value'):
    """
    Rule to initiating variable bounds using interpolation of two given profiles.

    :param m: Block
    :param t: Set time
    :param str index_name: name of the index set
    :param str up_profile_name: name of the upper bound profile parameter
    :param str low_profile_name: name of the lower bound profile parameter
    :return: None
    """

    from scipy.interpolate.interpolate import interp1d

    if not hasattr(m, index_name):
        raise AttributeError(f'{m} object has no attribute {index_name}.'
                             f' Cannot proceed interpolation for initialization')
    if not hasattr(m, up_profile_name):
        raise AttributeError(f'{m} object has no attribute {up_profile_name}.'
                             f' Cannot proceed interpolation for initialization')
    if not hasattr(m, low_profile_name):
        raise AttributeError(f'{m} object has no attribute {low_profile_name}.'
                             f' Cannot proceed interpolation for initialization')
    if not isinstance(m.component(index_name), Set):
        raise TypeError(f'{index_name} is not a instance of Set,'
                        f' but is actually : f{type(m.component(index_name))}. Cannot proceed.')
    if not isinstance(m.component(up_profile_name), Param):
        raise TypeError(f'{up_profile_name} is not a instance of Param,'
                        f' but is actually : f{type(m.component(up_profile_name))}. Cannot proceed.')
    if not isinstance(m.component(low_profile_name), Param):
        raise TypeError(f'{low_profile_name} is not a instance of Param,'
                        f' but is actually : f{type(m.component(low_profile_name))}. Cannot proceed.')

    interp_x = list(m.component(index_name).value)
    interp_up = list(m.component(up_profile_name).extract_values().values())
    interp_low = list(m.component(low_profile_name).extract_values().values())

    funct_up = interp1d(interp_x, interp_up, kind='linear', fill_value='extrapolate')
    funct_low = interp1d(interp_x, interp_low, kind='linear', fill_value='extrapolate')

    bu = float(funct_up(t))
    bl = float(funct_low(t))

    if bu is None or bl is None:
        Warning('Interpolation of the given profile returned None. '
                'It might be an error from profile or interpolation function.')
        return 0, 0
    else:
        return bl, bu
Example #12
0
def get_envelope(lambdas, spec):

    lambdas_all = lambdas * 1.0

    idx = np.where(np.isfinite(np.log10(spec)))[0]
    lambdas = lambdas[idx]
    spec = spec[idx]

    if len(lambdas) == 0:
        return lambdas_all, np.nan * np.zeros(
            lambdas_all.shape), np.nan * np.zeros(lambdas_all.shape)

    lambdas_interp = np.arange(lambdas[0], lambdas[-1], 1.0)
    f = interp.interp1d(lambdas, np.log10(spec), fill_value='extrapolate')
    spec = 10**f(lambdas_interp)
    lambdas = lambdas_interp * 1.0

    spec_lowpass = butter_lowpass_filter(spec, 0.002 / 3.0, 1.0, order=5)

    idx = np.where((lambdas >= 10200) & (lambdas <= 10100))[0]
    spec_lowpass[idx] = np.nan
    idx = np.where((lambdas >= 13000) & (lambdas <= 15000))[0]
    spec_lowpass[idx] = np.nan
    idx = np.where((lambdas >= 17900) & (lambdas <= 19700))[0]
    spec_lowpass[idx] = np.nan
    idx = np.where(~np.isnan(spec_lowpass))[0]
    lambdas_lowpass = lambdas[idx]
    spec_lowpass = spec_lowpass[idx]

    f = interp.interp1d(lambdas_lowpass,
                        np.log10(spec_lowpass),
                        fill_value='extrapolate')
    spec_lowpass = 10**f(lambdas_all)
    lambdas = lambdas_all * 1.0

    idx = argrelextrema(spec_lowpass, np.greater)[0]
    idx = np.hstack((0, idx, len(spec_lowpass) - 1))
    spec_envelope = spec_lowpass[idx]

    try:
        f = interp.interp1d(lambdas[idx],
                            spec_lowpass[idx],
                            fill_value='extrapolate',
                            kind='quadratic')
        spec_envelope = f(lambdas)
        spec_envelope = np.max(np.vstack((spec_lowpass, spec_envelope)),
                               axis=0)
    except:
        spec_envelope = np.nan * np.zeros(lambdas_all.shape)

    return lambdas, spec_lowpass, spec_envelope
 def nllk(mts):
     'current ests:'
     tau = exp(mts[1:2])
     mu_sigma =  [mts[0], exp(mts[2])];
     
     'parametrize solver'
     lSolver =  generateDefaultAdjointSolver(tau, mu_sigma,  Tf=Tf);
     lSolver.refine(0.01, 0.5);
     
     'interpolate control:'
     alphas_for_f = alphaF(lSolver._ts);
     
     'compute hitting time distn:'
     gs = lSolver.solve_hittime_distn_per_parameter(tau,
                                                    mu_sigma,
                                                    alphas_for_f,
                                                    force_positive=True)
     if visualize_gs:
         figure();    plot(lSolver._ts, gs, 'r') ;
     
     'Form likelihood'
     gs_interp = interp1d( lSolver._ts, gs)
     
     'Form negativee log likelihood'
     nllk = -sum(log( gs_interp(hts) ) )
     
     'diagnose:'
     print 'mts: %.3f,%.3f,%.3f,%.0f '%(mu_sigma[0], tau, mu_sigma[1], nllk);
     
     return  nllk; 
Example #14
0
def numerov(f, start, stop, dx, first, second):
    x = np.linspace(start, stop, abs(start - stop)/dx )
    psi = np.empty(len(x))
    dx2 = dx**2
    f1 = f(start)
    psi[0], psi[1] = first, second
    q0, q1 = psi[0]/(1 - dx2*f1/12), psi[1]/(1 - dx2*f1/12)
    for (i, ix) in enumerate(x):
        if i == 0: 
            continue
        f1 = f(ix)
        q2 = 2*q1 - q0 + dx2*f1*psi[i-1]
        q0, q1 = q1, q2
        psi[i] = q1/(1 - dx2*f1/12)

    return interpolate.interp1d(x, psi)
       
# def V(r):
#     return - 1. / r
#        
# sol = numerov(lambda r: -1, 1e-10, 100, 0.01, 0, 1)
# 
# x = np.arange(1e-10,100,0.01)
# y = sol(x)
# 
# plt.plot(x,y)
# plt.show()
    def physical_SED_model(self, bases_wave_rest, obs_wave, bases_flux, Av_star, z_star, sigma_star, Rv_coeff=3.4):

        # Calculate wavelength at object z
        wave_z = bases_wave_rest * (1 + z_star)

        # Kernel matrix
        box = int(np.ceil(max(3 * sigma_star)))
        kernel_len = 2 * box + 1
        kernel_range = np.arange(0, 2 * box + 1)
        kernel = np.empty((1, kernel_len))

        # Filling gaussian values (the norm factor is the sum of the gaussian)
        kernel[0, :] = np.exp(-0.5 * (np.square((kernel_range - box) / sigma_star)))
        kernel /= sum(kernel[0, :])

        # Convove bases with respect to kernel for dispersion velocity calculation
        basesGridConvolved = convolve2d(bases_flux, kernel, mode='same', boundary='symm')

        # Interpolate bases to wavelength ranges
        basesGridInterp = (interp1d(wave_z, basesGridConvolved, axis=1, bounds_error=True)(obs_wave)).T

        # Generate final flux model including reddening
        Av_vector = Av_star * np.ones(basesGridInterp.shape[1])
        obs_wave_resam_rest = obs_wave / (1 + z_star)
        Xx_redd = CCM89_Bal07(Rv_coeff, obs_wave_resam_rest)
        dust_attenuation = np.power(10, -0.4 * np.outer(Xx_redd, Av_vector))
        bases_grid_redd = basesGridInterp * dust_attenuation

        return bases_grid_redd
Example #16
0
    def _inline_label(self, xv, yv, x=None, y=None):
        """
        This will give the coordinates and rotation required to align a label with
        a line on a plot in SI units.
        """
        if y is None and x is not None:
            trash = 0
            (xv, yv) = self._to_pixel_coords(xv, yv)
            #x is provided but y isn't
            (x, trash) = self._to_pixel_coords(x, trash)

            #Get the rotation angle and y-value
            x, y, dy_dx = BasePlot.get_x_y_dydx(xv, yv, x)
            rot = np.arctan(dy_dx) / np.pi * 180.

        elif x is None and y is not None:
            #y is provided, but x isn't
            _xv = xv[::-1]
            _yv = yv[::-1]
            #Find x by interpolation
            x = interp1d(yv, xv)(y)
            trash = 0
            (xv, yv) = self._to_pixel_coords(xv, yv)
            (x, trash) = self._to_pixel_coords(x, trash)

            #Get the rotation angle and y-value
            x, y, dy_dx = BasePlot.get_x_y_dydx(xv, yv, x)
            rot = np.arctan(dy_dx) / np.pi * 180.
        (x, y) = self._to_data_coords(x, y)
        return (x, y, rot)
Example #17
0
    def __getitem__(self, time):
        from scipy.interpolate import interp1d
        from morphforge.traces.tracetypes.tracefixeddt import TraceFixedDT

        if isinstance(time, tuple):
            assert len(time) == 2
            start = unit(time[0])
            stop = unit(time[1])

            if start < self._time[0]:
                assert False, 'Time out of bounds'
            if stop > self._time[-1]:
                assert False, 'Time out of bounds'

            mask = np.logical_and(start < self.time_pts, self._time < stop)

            if len(np.nonzero(mask)[0]) < 2:
                assert False
            return TraceFixedDT(time=self._time[np.nonzero(mask)[0]],
                                data=self.data_pts[np.nonzero(mask)[0]])

        assert isinstance(
            time,
            pq.quantity.Quantity), "Times should be quantity. Found: %s %s" % (
                time, type(time))
        # Rebase the Time:
        time.rescale(self._time.units)
        interpolator = interp1d(self.time_pts_np, self.data_pts_np)
        d_mag = interpolator(time.magnitude)
        return d_mag * self.data_unit
Example #18
0
def main():
    import numpy
    from scipy.interpolate import interpolate

    ##### put monthly data here
    # e.g. northern hemisphere mountains:  (from the r.sun help page)
    #    [jan,feb,mar,...,dec]
    linke_data = numpy.array(
        [1.5, 1.6, 1.8, 1.9, 2.0, 2.3, 2.3, 2.3, 2.1, 1.8, 1.6, 1.5])
    ####

    linke_data_wrap = numpy.concatenate(
        (linke_data[9:12], linke_data, linke_data[0:3]))

    monthDays = numpy.array(
        [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31])
    #init empty
    midmonth_day = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
    for i in range(1, 12 + 1):
        midmonth_day[i - 1] = 15 + sum(monthDays[0:i])

    midmonth_day_wrap = numpy.concatenate((midmonth_day[9:12]-365, \
                                           midmonth_day,
                                           midmonth_day[0:3]+365))

    linke = interpolate.interp1d(midmonth_day_wrap,
                                 linke_data_wrap,
                                 kind='cubic')
    # print data for full year:
    #for i in range(1,365+1):
    #    print("%d %.4f" % (i, linke(i)) )

    print("%.4f" % linke(day))
    def train(self, images):
        r"""
        Train a standard intensity space and an associated transformation model.
        
        Note that the passed images should be masked to contain only the foreground.
        
        Parameters
        ----------
        images : sequence of array_likes
            A number of images.
        
        Returns
        -------
        IntensityRangeStandardization : IntensityRangeStandardization
            This instance of IntensityRangeStandardization
        """
        self.__stdrange = self.__compute_stdrange(images)
        
        lim = []
        for idx, i in enumerate(images):
            ci = numpy.array(numpy.percentile(i, self.__cutoffp))
            li = numpy.array(numpy.percentile(i, self.__landmarkp))
            ipf = interp1d(ci, self.__stdrange)
            lim.append(ipf(li))

            # treat single intensity accumulation error            
            if not len(numpy.unique(numpy.concatenate((ci, li)))) == len(ci) + len(li):
                raise SingleIntensityAccumulationError('Image no.{} shows an unusual single-intensity accumulation that leads to a situation where two percentile values are equal. This situation is usually caused, when the background has not been removed from the image. Another possibility would be to reduce the number of landmark percentiles landmarkp or to change their distribution.'.format(idx))
            
        self.__model = [self.__stdrange[0]] + list(numpy.mean(lim, 0)) + [self.__stdrange[1]]
        self.__sc_umins = [self.__stdrange[0]] + list(numpy.min(lim, 0)) + [self.__stdrange[1]]
        self.__sc_umaxs = [self.__stdrange[0]] + list(numpy.max(lim, 0)) + [self.__stdrange[1]]
            
        return self
Example #20
0
 def calibrate(self,data_files, plot=1, fig = None):
     print 'calibrating'
     # data structure: [m1,m2,focus,x,y,z]
     
     # if not a list, make it a list
     if type(data_files) is not list:
         print 'not a list, making it a list'
         data_files = [data_files]
 
     # load data
     self.data = None
     for data_file in data_files:
         print data_file
         if self.data is None:
             self.data = np.loadtxt(  data_file, delimiter=',' )
         else:
             tmp = np.loadtxt(  data_file,delimiter=',')
             self.data = np.vstack((self.data,tmp))
     
     self.calc_distc()
     focus = self.data[:,2]
     
     # fit a/(x) + c
     print 'fitting using: ', self.interpolation
     
     if self.interpolation is 'xinv':
         self.coeffs = np.polyfit(self.distc**(-1), focus, 1)
         ## now try fmin fit using coeffs as seed ##
         seed = np.zeros(3)
         seed[0] = self.coeffs[0]
         seed[2] = self.coeffs[1]
         
         tmp = scipy.optimize.fmin( self.fmin_func, seed, full_output = 1, disp=0)
         self.coeffs = tmp[0]
     
     if self.interpolation is 'polyfit2':
         self.coeffs = np.polyfit(self.distc, focus, 2)
         print 'linear coeffs: ', self.coeffs
     
     if self.interpolation is 'interp1d':
         print 'interpolating using interp1d'
         self.interp = interpolate.interp1d(self.distc, focus, kind = 'linear', fill_value = 0)
  
         
     if plot == 1:
         if fig is None:    
             fig = plt.figure(1)
         plt.scatter(self.distc,focus)
         xi = np.linspace(min(self.distc),max(self.distc),50)
         yi = [self.get_focus_distc(x) for x in xi]
         
         plt.title('Calibration data for Pan Tilt Focus')
         plt.xlabel('distance to camera center, m')
         plt.ylabel('focus motor setting, radians')
         plt.plot(xi,yi)
         fig.show()
             
     
     
     return 1
Example #21
0
 def interpolate_wavelength(I, new_wavelength, fraction):
     new_theta = np.arcsin(range_stl * new_wavelength * 0.5)
     f = interp1d(new_theta,
                  I * fraction,
                  bounds_error=False,
                  fill_value=0.0)
     return f(specimen.range_theta)
Example #22
0
def fpr_at_tpr(y_true, y_pred, tpr_val=0.95, pos_label=1):
    """ Approximate (by interpolating the ROC curve) the False Positive Rate at a
    specific True-Positive Rate.
    """
    fpr, tpr, _ = roc_curve(y_true, y_pred, pos_label=pos_label)
    fpr_vs_tpr = interpolate.interp1d(tpr, fpr)
    return float(fpr_vs_tpr(tpr_val))
    def __cmap_discretise(self, cmap, N):
        """ Returns a discrete colormap from a continuous colormap 
        
            cmap: colormap instance, e.g. cm.jet
            N: Number of colors
            
            http://www.scipy.org/Cookbook/Matplotlib/ColormapTransformations
        """
        cdict = cmap._segmentdata.copy()
        # N colors
        colors_i = np.linspace(0, 1.0, N)
        # N + 1 indices
        indices = np.linspace(0, 1.0, N + 1)

        for key in ("red", "green", "blue"):
            # Find the N colors
            D = np.array(cdict[key])
            I = interpolate.interp1d(D[:, 0], D[:, 1])
            colors = I(colors_i)
            # Place those colors at the correct indices
            A = np.zeros((N + 1, 3), float)
            A[:, 0] = indices
            A[1:, 1] = colors
            A[:-1, 2] = colors
            # Create a tuple for the dictionary
            L = []
            for l in A:
                L.append(tuple(l))
            cdict[key] = tuple(L)

        return mpl.colors.LinearSegmentedColormap("colormap", cdict, 1024)
Example #24
0
def long_df_to_array(df,
                     groupby_cols,
                     meta_cols,
                     val_col,
                     idx_col,
                     min_idx=1,
                     min_length=5,
                     length=10):

    df = df.sort_values(by=groupby_cols)

    sequences = []
    meta_data = []
    og_length = []

    for name, group in df.groupby(groupby_cols):

        if group.shape[0] < min_length or group[idx_col].min() > min_idx:
            continue
        interpolater = interpolate.interp1d(group[idx_col],
                                            group[val_col],
                                            bounds_error=True)
        x_grid = np.linspace(1, group[idx_col].max(), length)

        og_length.append(len(group))
        sequences.append(interpolater(x_grid))
        meta_data.append(group[meta_cols].values[0, :])

    return np.vstack(sequences), np.vstack(meta_data), og_length
def calculateTs_Kolmogorov_BVP(alpha, beta, tauchar = 1.0, xth= 1.0, xmin = -1.0):
    D = beta*beta /2.
    
    def dS(S,x):
        return  -( (alpha-x/tauchar)/D ) * S -1.0/D;
    
    S_0 = .0;
    
    xs = linspace(xmin, xth, 1000); dx = (xs[1]-xs[0])
    Ss = odeint(dS, S_0, xs);
    if max(Ss) > .0:
        raise RuntimeError('Ss should be  negative')
    
    T1s = -cumsum(Ss[-1::-1]) * dx
    T1s = T1s[-1::-1]
    
    T1_interpolant = interp1d(xs, T1s, bounds_error=False, fill_value = T1s[-1]);
    def dS2(S,x):
        T1 = T1_interpolant(x)
        return  -( (alpha-x/tauchar)/D ) * S -2.0*T1/D;
    
    Ss = odeint(dS2, S_0, xs);
    if max(Ss) > .0:
        raise RuntimeError('Ss should be  negative')
    T2s = -cumsum(Ss[-1::-1]) * dx
    T2s = T2s[-1::-1]
    
    return xs, T1s, T2s  
Example #26
0
 def _inline_label(self,xv,yv,x=None,y=None):
     """
     This will give the coordinates and rotation required to align a label with
     a line on a plot in SI units.
     """
     if y is None and x is not None:
         trash=0
         (xv,yv)=self._to_pixel_coords(xv,yv)
         #x is provided but y isn't
         (x,trash)=self._to_pixel_coords(x,trash)
 
         #Get the rotation angle and y-value
         x,y,dy_dx = BasePlot.get_x_y_dydx(xv,yv,x)
         rot = np.arctan(dy_dx)/np.pi*180.
 
     elif x is None and y is not None:
         #y is provided, but x isn't
         _xv = xv[::-1]
         _yv = yv[::-1]
         #Find x by interpolation
         x = interp1d(yv, xv)(y)
         trash=0
         (xv,yv)=self._to_pixel_coords(xv,yv)
         (x,trash)=self._to_pixel_coords(x,trash)
 
         #Get the rotation angle and y-value
         x,y,dy_dx = BasePlot.get_x_y_dydx(xv,yv,x)
         rot = np.arctan(dy_dx)/np.pi*180.
     (x,y)=self._to_data_coords(x,y)
     return (x,y,rot)
Example #27
0
def interpolate_values_1d(x,y,x_points=None,kind='linear'):
    try:
        from scipy.interpolate.interpolate import interp1d
        if x_points is None:
            return interp1d(x, y, kind=kind)(x[np.isfinite(x)])
        else:
            return interp1d(x, y, kind=kind)(x_points)
    except ImportError:
        if kind != 'linear':
            warnings.warn(
              "You requested a non-linear interpolation, but SciPy is not available. Falling back to linear interpolation.",
              UserWarning)
        if x_points is None:
            return np.interp((x[np.isfinite(x)]), x, y)
        else:
            return np.interp(x_points, x, y)
Example #28
0
 def get_values(self, time_array):
     from scipy.interpolate.interpolate import interp1d
     time_units = self._time.units
     data_units = self._data.units
     interpolator = interp1d(self._time.magnitude, self._data.magnitude)
     return interpolator(time_array.rescale(time_units).magnitude) \
         * data_units
Example #29
0
 def RebaseTime(cls, trace, newTimebase, **kwargs):
     bounds_error = False
     
     interpolator = interp1d(trace._time, trace.rawdata, "linear", bounds_error=bounds_error)
     newData = interpolator(newTimebase)
     rebasedTrace = Trace(newTimebase, newData, timeUnit=trace.timeUnit, dataUnit=trace.dataUnit, **kwargs)
     return rebasedTrace
Example #30
0
    def __cmap_discretise(self, cmap, N):
        """ Returns a discrete colormap from a continuous colormap 
        
            cmap: colormap instance, e.g. cm.jet
            N: Number of colors
            
            http://www.scipy.org/Cookbook/Matplotlib/ColormapTransformations
        """
        cdict = cmap._segmentdata.copy()
        # N colors
        colors_i = np.linspace(0, 1., N)
        # N + 1 indices
        indices = np.linspace(0, 1., N + 1)

        for key in ('red', 'green', 'blue'):
            # Find the N colors
            D = np.array(cdict[key])
            I = interpolate.interp1d(D[:, 0], D[:, 1])
            colors = I(colors_i)
            #Place those colors at the correct indices
            A = np.zeros((N + 1, 3), float)
            A[:, 0] = indices
            A[1:, 1] = colors
            A[:-1, 2] = colors
            #Create a tuple for the dictionary
            L = []
            for l in A:
                L.append(tuple(l))
            cdict[key] = tuple(L)

        return mpl.colors.LinearSegmentedColormap('colormap', cdict, 1024)
Example #31
0
def getTaylorCurveDiv():  #Taylor model middle curve
    xdata = [0.2464348739, 0.1295609244]
    y = [0.001275906, 0.1306574186]
    x = np.linspace(max(xdata), min(xdata), 60)
    f = interp1d(xdata, y)
    y = f(x)
    return ary([x, y])
Example #32
0
def linke_interp(day,turb_array):
    # put monthly data here
    # Angelo area LT from helios satellite data (http://www.soda-is.com/linke/linke_helioserve.html)
    # ltm1 and angelo-1 are identical, kept for backwards compatibility.  They are helios - 1
    if turb_array == 'helios':
        linke_data = numpy.array ([3.2,3.2,3.2,3.4,3.7,3.8,3.7,3.8,3.5,3.4,3.1,2.9])
    elif turb_array == 'angelo80':
        linke_data = numpy.array ([2.56,2.56,2.56,2.72,2.96,3.04,2.96,3.04,2.80,2.72,2.48,2.32])
    elif turb_array == 'angelo70':
        linke_data = numpy.array ([2.3,2.3,2.3,2.5,2.7,2.8,2.7,2.8,2.6,2.5,2.3,2.1])
    elif turb_array == 'angelo-1':
        linke_data = numpy.array ([2.2,2.2,2.2,2.4,2.7,2.8,2.7,2.8,2.5,2.4,2.1,1.9])
    elif turb_array == 'ltm1':
        linke_data = numpy.array ([2.2,2.2,2.2,2.4,2.7,2.8,2.7,2.8,2.5,2.4,2.1,1.9])
    else:
        linke_data = numpy.array ([1.5,1.6,1.8,1.9,2.0,2.3,2.3,2.3,2.1,1.8,1.6,1.5])
    
    linke_data_wrap = numpy.concatenate((linke_data[9:12],linke_data, linke_data[0:3]))
    monthDays = numpy.array ([0,31,28,31,30,31,30,31,31,30,31,30,31])
    midmonth_day = numpy.array ([0,0,0,0,0,0,0,0,0,0,0,0]) # create empty array to fill
    for i in range(1, 12+1):
        midmonth_day[i-1] = 15 + sum(monthDays[0:i])
    midmonth_day_wrap = numpy.concatenate((midmonth_day[9:12]-365, midmonth_day,midmonth_day[0:3]+365))
    linke = interpolate.interp1d(midmonth_day_wrap, linke_data_wrap, kind='cubic')
    lt = linke(day)
    return lt
Example #33
0
    def calcRate(self):
        tt = numpy.arange(0, self.Tstim, self.dt)
        t = numpy.arange((-self.binwidth / 2), self.Tstim + self.binwidth,
                         self.binwidth)

        nSegment = numpy.round(self.Tstim / self.binwidth)
        ttseg = numpy.arange(0, self.binwidth - self.dt / 2, self.dt)
        r = []
        t = []

        for nS in range(nSegment + 1):
            if self.nRates == numpy.inf:
                f = numpy.random.uniform(0,
                                         (self.fmax - self.fmin)) + self.fmin
            else:
                f = numpy.floor(numpy.random.uniform(
                    0, self.nRates +
                    1)) / self.nRates * (self.fmax - self.fmin) + self.fmin

            pr = numpy.sin(f * ttseg * 2 * numpy.pi)
            r = numpy.r_[r, pr]
            t = numpy.r_[t, (ttseg + nS * self.binwidth)]

        r = interp1d(t, self.freq * r / 2 + self.freq / 2, 'linear')

        return r(tt)
def cmap_discretize(cmap, N):
    """Return a discrete colormap from the continuous colormap cmap.
    
    cmap: colormap instance, eg. cm.jet. 
    N: Number of colors.
    
    Example
    x = resize(arange(100), (5,100))
    djet = cmap_discretize(cm.jet, 5)
    imshow(x, cmap=djet)
    
    from http://www.scipy.org/Cookbook/Matplotlib/ColormapTransformations
    """
    cdict = cmap._segmentdata.copy()
    # N colors
    colors_i = np.linspace(0,1.,N)
    # N+1 indices
    indices = np.linspace(0,1.,N+1)
    for key in ('red','green','blue'):
                # Find the N colors
        D = np.array(cdict[key])
        I = interpolate.interp1d(D[:,0], D[:,1])
        colors = I(colors_i)
        # Place these colors at the correct indices.
        A = np.zeros((N+1,3), float)
        A[:,0] = indices
        A[1:,1] = colors
        A[:-1,2] = colors
        # Create a tuple for the dictionary.
        L = []
        for l in A:
            L.append(tuple(l))
        cdict[key] = tuple(L)
    # Return colormap object.
    return mpl.colors.LinearSegmentedColormap('colormap',cdict,1024)
Example #35
0
    def __getitem__(self, time):
        from scipy.interpolate import interp1d
        from morphforge.traces.tracetypes.tracefixeddt import TraceFixedDT

        if isinstance(time, tuple):
            assert len(time) == 2
            start = unit(time[0])
            stop = unit(time[1])

            if start < self._time[0]:
                assert False, 'Time out of bounds'
            if stop > self._time[-1]:
                assert False, 'Time out of bounds'

            mask = np.logical_and(start < self.time_pts, self._time < stop)

            if len(np.nonzero(mask)[0]) < 2:
                assert False
            return TraceFixedDT(time=self._time[np.nonzero(mask)[0]],
                                data=self.data_pts[np.nonzero(mask)[0]])


        assert isinstance(time, pq.quantity.Quantity), "Times should be quantity. Found: %s %s"%(time, type(time))
        # Rebase the Time:
        time.rescale(self._time.units)
        interpolator = interp1d(self.time_pts_np,
                                self.data_pts_np)
        d_mag = interpolator(time.magnitude)
        return d_mag * self.data_unit
Example #36
0
def calc_prob(wav1, flux1):

    wav2, flux2, error = data_out["lambda"], data_out["data"], data_out[
        "error"]
    sigma = np.abs(error / (flux2 * np.log(10)))

    flux1 = np.log10(np.abs(flux1.value))
    flux2 = np.log10(np.abs(flux2))

    f = interp.interp1d(wav1, flux1)
    flux1new = f(wav2)

    chisquarevals = ((flux1new - flux2) / sigma)**2

    chisquaresum = np.sum(chisquarevals)
    chisquaresum = (1 / float(len(chisquarevals) - 1)) * chisquaresum
    chisquare = chisquaresum

    if np.isnan(chisquare):
        prob = -np.inf
    else:
        prob = scipy.stats.chi2.logpdf(chisquare, 1, loc=0, scale=1)

    if np.isnan(prob):
        prob = -np.inf

    if prob == 0.0:
        prob = -np.inf

    #if np.isfinite(prob):
    #    print T, F, prob

    return prob
Example #37
0
    def compute_flight_points(self, flight_points: Union[FlightPoint, pd.DataFrame]):
        # pylint: disable=too-many-arguments  # they define the trajectory
        sfc, thrust_rate, thrust = self.compute_flight_points_from_dt4(
            flight_points.mach,
            flight_points.altitude,
            self._get_delta_t4(flight_points.engine_setting),
            flight_points.thrust_is_regulated,
            flight_points.thrust_rate,
            flight_points.thrust,
        )
        # flight_points.sfc = sfc raises a warning if flight_points is a DataFrame that has not
        # already this field, so we add needed fields before setting values
        if isinstance(flight_points, pd.DataFrame):
            new_column_names = flight_points.columns.tolist()
            for name in ["sfc", "thrust_rate", "thrust"]:
                if name not in new_column_names:
                    flight_points.insert(len(flight_points.columns), name, value=np.nan)

        # SFC correction for NEO engines dependent on altitude.
        k_sfc_alt = interp1d(
            [-1000.0, 0.0, 13106.4, 20000.0],
            np.hstack((self.k_sfc_sl, self.k_sfc_sl, self.k_sfc_cr, self.k_sfc_cr)),
        )
        k_sfc = k_sfc_alt(flight_points.altitude)

        flight_points.sfc = sfc * k_sfc
        flight_points.thrust_rate = thrust_rate
        flight_points.thrust = thrust
Example #38
0
 def __init__(self, sim_params: dict, saved_data: xr.Dataset):
     start_time = np.datetime64(sim_params['start_time'].replace(
         '/', '-').replace(' ', 'T'))
     final_time = start_time + np.timedelta64(
         round(sim_params['duration'] * 1e9), 'ns')
     if start_time == saved_data.time.values[0]:
         start_index = 0
     else:
         start_index = np.where(saved_data.time.values < start_time)
         if len(start_index[0]) > 0:
             start_index = start_index[0][-1]
         else:
             raise ValueError(
                 'Simulation start time preceeds orbit start time')
     final_index = np.where(saved_data.time.values > final_time)
     if len(final_index[0]) > 0:
         final_index = final_index[0][0] + 1
     else:
         raise ValueError('Simulation final time exceeds orbit final time')
     orbit_data = saved_data.isel(time=slice(start_index, final_index))
     t = orbit_data.time.values.astype('float')
     t = (t - t[0]) * 1e-9
     ab = np.concatenate(
         (orbit_data.sun.values, orbit_data.mag.values,
          orbit_data.atmos.values.reshape(
              -1, 1), orbit_data.lons.values.reshape(
                  -1, 1), orbit_data.lats.values.reshape(
                      -1, 1), orbit_data.alts.values.reshape(-1, 1),
          orbit_data.positions.values, orbit_data.velocities.values),
         axis=1)
     self._interp_data = interp1d(t, ab.T)
def estimateHarness(simPs, fbkSoln,
                     Nblocks, Nhits, 
                     refine_factor = 2.,
                     fig_name=None, reestimate = True):
    
    #Load:
    simPaths = SimulationPaths.load([Nblocks, Nhits, simPs.mu, simPs.tau_char, simPs.sigma, ''])
    
    if reestimate:
        'Target Data:'
        thits_Blocks = simPaths.thits_blocks;
        
        'FP Solver:'
        lSolver = deepcopy(fbkSoln._Solver);
        
        ts = simPaths.sim_ts;
        alphasMap = simPaths.alphasDict;
        Nalphas = len(alphasMap);
        print 'Loaded %d Blocks of %d Hitting times for %d controls:'%(thits_Blocks.shape[1], thits_Blocks.shape[2], thits_Blocks.shape[0])
        
        betaEsts = empty((Nalphas, Nblocks))
            
        alpha_max = fbkSoln._alpha_bounds[-1]; 
        
        
        'refine the solver:'
        lSolver.rediscretize(lSolver._dx/refine_factor,   lSolver._dt/refine_factor,
                             lSolver.getTf(), lSolver._xs[0])
        
        for adx, (alpha_tag, alphas) in enumerate( alphasMap.iteritems() ):
            
            print adx, alpha_tag 
            
            alphaF =  interp1d(ts, alphas , bounds_error = False, fill_value = alpha_max)
            
            for bdx in xrange(Nblocks):
    #        for bdx in [0]:
                hts = squeeze(thits_Blocks[adx, bdx,:]);
                
    #            subplot(3, 1, adx)
    #            hist(hts, bins = 100)
    #            title('%s %d'%(alpha_tag, len(hts) ))
                
                betaEsts[adx, bdx] = calcBetaEstimate(lSolver,
                                                      hts,
                                                      alphaF,
                                                      [simPaths.simParams.mu,
                                                        simPaths.simParams.sigma] );
                print '\n', betaEsts[adx, bdx]
        
        'Append estimates to the paths:' 
        simPaths.betaEsts = betaEsts;
        
        'resave:'
        simPaths.save();
    
    ''' Post Analysis: visualize and latexify results'''
    postAnalysis( Nblocks = Nblocks, 
                  Nhits = Nhits, 
                  simPs = simPs) 
Example #40
0
def interpolate_values_1d(x, y, x_points=None, kind='linear'):
    try:
        from scipy.interpolate.interpolate import interp1d
        if x_points is None:
            return interp1d(x, y, kind=kind)(x[np.isfinite(x)])
        else:
            return interp1d(x, y, kind=kind)(x_points)
    except ImportError:
        if kind != 'linear':
            warnings.warn(
                "You requested a non-linear interpolation, but SciPy is not available. Falling back to linear interpolation.",
                UserWarning)
        if x_points is None:
            return np.interp((x[np.isfinite(x)]), x, y)
        else:
            return np.interp(x_points, x, y)
Example #41
0
def apply_intensity_normalization_model(img, landmarks_lst):
    """Description: apply the learned intensity landmarks to the input image."""
    percent_decile_lst = [1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 99]
    vals = list(img)
    landmarks_lst_cur = np.percentile(vals, q=percent_decile_lst)

    # create linear mapping models for the percentile segments to the learned standard intensity space
    linear_mapping = interp1d(landmarks_lst_cur,
                              landmarks_lst,
                              bounds_error=False)

    # transform the input image intensity values
    output = linear_mapping(img)

    # treat image intensity values outside of the cut-off percentiles range separately
    below_mapping = exp_model(landmarks_lst_cur[:2], landmarks_lst[:2],
                              landmarks_lst[0])
    output[img < landmarks_lst_cur[0]] = below_mapping(
        img[img < landmarks_lst_cur[0]])

    above_mapping = exp_model(landmarks_lst_cur[-3:-1], landmarks_lst[-3:-1],
                              landmarks_lst[-1])
    output[img > landmarks_lst_cur[-1]] = above_mapping(
        img[img > landmarks_lst_cur[-1]])

    return output.astype(np.float32)
 def transform(self, image, surpress_mapping_check = False):
     r"""
     Transform an images intensity values to the learned standard intensity space.
     
     Note that the passed image should be masked to contain only the foreground.
     
     The transformation is guaranteed to be lossless i.e. a one-to-one mapping between
     old and new intensity values exists. In cases where this does not hold, an error
     is thrown. This can be suppressed by setting ``surpress_mapping_check`` to 'True'.
     Do this only if you know what you are doing.
     
     Parameters
     ----------
     image : array_like
         The image to transform.
     surpress_mapping_check : bool
         Whether to ensure a lossless transformation or not.
     
     Returns
     -------
     image : ndarray
         The transformed image
     
     Raises
     -------
     InformationLossException 
         If a lossless transformation can not be ensured
     Exception
         If no model has been trained before
     """
     if None == self.__model:
         raise UntrainedException('Model not trained. Call train() first.')
     
     image = numpy.asarray(image)
     
     # determine image intensity values at cut-off percentiles & landmark percentiles
     li = numpy.percentile(image, [self.__cutoffp[0]] + self.__landmarkp + [self.__cutoffp[1]])
     
     # treat single intensity accumulation error            
     if not len(numpy.unique(li)) == len(li):
         raise SingleIntensityAccumulationError('The image shows an unusual single-intensity accumulation that leads to a situation where two percentile values are equal. This situation is usually caused, when the background has not been removed from the image. The only other possibility would be to re-train the model with a reduced number of landmark percentiles landmarkp or a changed distribution.')
     
     # create linear mapping models for the percentile segments to the learned standard intensity space  
     ipf = interp1d(li, self.__model, bounds_error = False)
     
     # transform the input image intensity values
     output = ipf(image)
     
     # treat image intensity values outside of the cut-off percentiles range separately
     llm = IntensityRangeStandardization.linear_model(li[:2], self.__model[:2])
     rlm = IntensityRangeStandardization.linear_model(li[-2:], self.__model[-2:])
     
     output[image < li[0]] = llm(image[image < li[0]])
     output[image > li[-1]] = rlm(image[image > li[-1]])
     
     if not surpress_mapping_check and not self.__check_mapping(li):
         raise InformationLossException('Image can not be transformed to the learned standard intensity space without loss of information. Please re-train.')
     
     return output
Example #43
0
    def calcRate(self):
        t = numpy.array([0, self.Tstim])
        r = numpy.array([self.f, self.f])

        tt = numpy.arange(0, self.Tstim, self.dt)
        r = interp1d(t, r, 'linear')  # nearest not implemented yet

        return r(tt)
Example #44
0
 def get_values(self, time_array):
     from scipy.interpolate.interpolate import interp1d
     time_units = self._time.units
     data_units = self._data.units
     interpolator = interp1d(self._time.magnitude,
                             self._data.magnitude)
     return interpolator(time_array.rescale(time_units).magnitude) \
         * data_units
Example #45
0
 def __init__(self,
              redshift,
              absnap,
              hubble=0.71,
              fbar=0.17,
              units=None,
              sf_neutral=True):
     if units is not None:
         self.units = units
     else:
         self.units = unitsystem.UnitSystem()
     self.absnap = absnap
     self.f_bar = fbar
     self.redshift = redshift
     self.sf_neutral = sf_neutral
     #Interpolate for opacity and gamma_UVB
     #Opacities for the FG09 UVB from Rahmati 2012.
     #IMPORTANT: The values given for z > 5 are calculated by fitting a power law and extrapolating.
     #Gray power law was: -1.12e-19*(zz-3.5)+2.1e-18 fit to z > 2.
     #gamma_UVB was: -8.66e-14*(zz-3.5)+4.84e-13
     #This is clearly wrong, but this model is equally a poor choice at these redshifts anyway.
     gray_opac = [
         2.59e-18, 2.37e-18, 2.27e-18, 2.15e-18, 2.02e-18, 1.94e-18,
         1.82e-18, 1.71e-18, 1.60e-18
     ]
     gamma_UVB = [
         3.99e-14, 3.03e-13, 6e-13, 5.53e-13, 4.31e-13, 3.52e-13, 2.678e-13,
         1.81e-13, 9.43e-14
     ]
     zz = [0, 1, 2, 3, 4, 5, 6, 7, 8]
     self.redshift_coverage = True
     if redshift > zz[-1]:
         self.redshift_coverage = False
         print("Warning: no self-shielding at z=", redshift)
     else:
         gamma_inter = intp.interp1d(zz, gamma_UVB)
         gray_inter = intp.interp1d(zz, gray_opac)
         self.gray_opac = gray_inter(redshift)
         self.gamma_UVB = gamma_inter(redshift)
     #self.hy_mass = 0.76 # Hydrogen massfrac
     self.gamma = 5. / 3
     #Boltzmann constant (cgs)
     self.boltzmann = 1.38066e-16
     self.hubble = hubble
     #Physical density threshold for star formation in H atoms / cm^3
     self.PhysDensThresh = self._get_rho_thresh(hubble)
Example #46
0
 def __init__(self, redshift, f_bar=0.17):
     """
     Sets the necessary parameters for the correction, by interpolation 
     according to the input redshift.
     
     Parameters
     ----------
     redshift : int or float
         redshift at which the data was drawn, to compute the 
         self-shielding correction.
     f_bar : float, optional
         Baryon fraction. The default value corresponds to the LambdaCDM cosmology. 
         We adopt fiducial cosmological parameters consistent with the most recent 
         WMAP 7-year results:  Omega_m = 0.272 y Omega_b = 0.0455. 
         f_bar = Omega_b / Omega_m.    
     """    
     
     # Code flow:
     # =====================
     # > Assign the necessary parameters.   
     # > Interpolates the values of sigma_vHI and gamma_UVB for the input redshift.  
     self.f_bar = f_bar
     self.redshift = redshift
     
     # Boltzmann constant in (erg/K)
     self.boltzmann = ct.k_B.value
     
     # Redshift range where the correction can be applied
     z = [0, 1, 2, 3, 4, 5]
     
     # The hydrogen photoionization rate by the metagalactic ultra-violet
     # background radiation in (s**-1) .
     gamma_UVB = [3.99e-14, 3.03e-13, 6e-13, 5.53e-13,
                  4.31e-13, 3.52e-13]
     
     # The gray absorption cross-section in (cm**2)
     sigma_vHI = [2.59e-18, 2.37e-18, 2.27e-18, 2.15e-18,
                  2.02e-18, 1.94e-18] 
             
     if redshift > 5:
         logging.warning(" Select a redshift <= 5. No self-shielding at z = {}".format(self.redshift))
     else:
         gamma_inter = intp.interp1d(z, gamma_UVB)
         self.gamma_UVB = gamma_inter(redshift)
         sigma_inter = intp.interp1d(z, sigma_vHI)
         self.sigma_vHI = sigma_inter(redshift)
Example #47
0
def same_ratio(kind=0):
    # 起止天
    d2d = ['1964-09-29', '1964-10-13']
    cur.execute("SELECT q FROM qd WHERE dated BETWEEN ? AND ?;", d2d)
    data = np.array(cur.fetchall()).flatten()
    x = np.array([i for i in range(1, data.size + 1)])
    xnew = np.linspace(x.min(), x.max(), x.size * 100)

    #                5%       0.2%     0.1%
    qs15 = np.array([150.93, 250.72, 317.40])  # 15日设计值
    k15 = qs15 / 142.91  # Q设/Q典
    q_des5 = data * k15[0]
    q_des02 = data * k15[1]
    q_des01 = data * k15[2]

    for d in q_des5:
        print(round(d, 0))

    print('\n')

    for d in q_des02:
        print(round(d, 0))

    print('\n')

    for d in q_des01:
        print(round(d, 0))

    kind = 'quadratic'
    f = interpolate.interp1d(x, data, kind=kind)
    f5 = interpolate.interp1d(x, q_des5, kind=kind)
    f02 = interpolate.interp1d(x, q_des02, kind=kind)
    f01 = interpolate.interp1d(x, q_des01, kind=kind)

    # 绘图
    plt.plot(xnew, f(xnew), '-', label='$typical \quad year$')  # 典型年
    plt.plot(xnew, f5(xnew), '--', label='$5\%$')  # 设计防洪标准5%
    plt.plot(xnew, f02(xnew), '-.', label='$0.2\%$')  # 设计洪水0.2%
    plt.plot(xnew, f01(xnew), ':', label='$0.1\%$')  # 校核洪水0.1%
    plt.grid()
    # plt.xticks(t, t)
    plt.xlabel('$day$')
    plt.ylabel('$Q(m^3/s)$')
    plt.legend()
    plt.show()
Example #48
0
 def _mtf(self, x=50):
     norm = max(roi.mtf for roi in self.hc_rois)
     ys = [roi.mtf / norm for roi in self.hc_rois]
     xs = np.arange(len(ys))
     f = interp1d(ys, xs)
     try:
         mtf = f(x / 100)
     except ValueError:
         mtf = min(ys)
     return float(mtf)
Example #49
0
def interpolate_data(data):
    """
    Interpolates input data to constant abscissa. Required for fast convolution.
    """
    L = data[:,0]
    Iexp = data[:,1]
    step = (L.max() - L.min()) / (len(L)-1)
    Linterp = arange(L.min(), L.max()+step, step)
    f = interpolate.interp1d(L, Iexp)
    Iinterp = f(Linterp)
    return column_stack((Linterp, Iinterp))
    def beta_nllk(beta):
        ''' Тhis is the root'''
        Fs = squeeze( S.solve(alphas,
                              [beta]));
        dt = S._ts[1] - S._ts[0]; 
        gs = -diff(Fs[:, -1]) / dt;

        gs_interp = interp1d( S._ts[1:], gs)
        nllk = -sum(log( gs_interp(hts) ) )
#        print beta, nllk
        return  nllk ; 
Example #51
0
def interp1d(value):
    if (isinstance(value, tuple)
        and len(value) == 2
        and value[0] == erlang.List("interp1d")):
        logger.debug(value)
        (x, y, kind, axis, copy, bounds_error) = value[1]
        kind = kind.decode("utf-8")
        value = interpolate.interp1d(
            x, y, kind=kind, axis=axis, copy=copy,
            bounds_error=bounds_error)
    return value
Example #52
0
def get_basic_OU_function(theta=0.001, total_time=1000, precision=0.1):
# -----------------------------------------------------------------------------
    from scipy.interpolate.interpolate import interp1d
    import neurovivo.common as cmn

    times = precision*np.arange(int(total_time/precision))
    #theta = 1.*theta/precision
    OU_noise = cmn.OU_process_basic(theta, times)
    
    OU_interpolator = interp1d(times, OU_noise, "linear", bounds_error=False)
    
    return OU_interpolator
Example #53
0
    def __init__(self, nkdata):
        """
        initializes the material
        *nkdata* can be a single real or complex value or a file in an
        appropriate format (see below)
        """

        self.is_dispersive = None   # is n_c wavelength dependent?
        self.n_c = None             # complex refractive index

        # for saving the data given in the datafile "nkdata"
        self.wl_o = []      # original wavelengthdata (from file)
        self.n_c_o = []     # original complex n data (from file)

        # 1. material without dispersion:
        #    nkdata is a complex numer, valid for all wavelength
        # 2. material with dispersion:
        #    nkdata a path to a textfile listing (lambda n k)
        #    for all relevant wavelengths
        try:
            n_c = complex(nkdata)
        except ValueError:
            self.is_dispersive = True
        else:
            self.is_dispersive = False
            self.n_c = n_c.conjugate()
            if self.n_c.imag > 0:
                msg = ("Positive imaginary part of refractive"
                       " index {0} means gain material!").format(nkdata)
                raise Exception(msg)
            if self.n_c.real < 0:
                raise Exception("Negative real part of refractive "
                                "index is not supported.")

        if self.is_dispersive:
            # check file
            nk_path = expand_path(nkdata)
            #  read file
            wl_o, n_c_o = parse_nkfile(nk_path)
            # convert to N = n - i k convention
            self.n_c_o = np.array(n_c_o).conjugate()
            self.wl_o = np.array(wl_o)
            if (self.n_c_o.imag > 0).any():
                raise ValueError(
                    "Positive imaginary part of refractive"
                    " index {0} means gain material!").format(self.n_c_o)
            if (self.n_c_o.real < 0).any():
                raise Exception("Negative real part of refractive "
                                "index is not supported.")

            self.interp_n = interp.interp1d(
                self.wl_o, self.n_c_o, kind='linear',
                copy=False, bounds_error=True)
Example #54
0
 def interpolate(fx, new_x, **kwargs):
     # SciPy's interp1d needs float values, so if we're given
     # integer values, convert them to the smallest possible
     # float dtype that can accurately preserve the values.
     if fx.dtype.kind == 'i':
         fx = fx.astype(np.promote_types(fx.dtype, np.float16))
     x = src_points.astype(fx.dtype)
     interpolator = interp1d(x, fx, kind='linear',
                             bounds_error=bounds_error, **kwargs)
     if extrapolation_mode == 'linear':
         interpolator = Linear1dExtrapolator(interpolator)
     new_fx = interpolator(np.array(new_x, dtype=fx.dtype))
     return new_fx
 def getStimulation(self, particleEnsemble, Tf, init_ts_alpha = None): 
     ''' main inner call:'''
     ts, a_opt = self.calculateOptimalControl(particleEnsemble.taus, 
                                     particleEnsemble.weights,                                        
                                     Tf, init_ts_alpha=init_ts_alpha)
     
     'Archive:'
     self.ts_aopts_Massif.append([ts, a_opt])
     
     'Return:'
     return interp1d(ts, a_opt,
                     bounds_error = False,
                     fill_value = a_opt[-1]);
Example #56
0
def _error( value ) :
  '''Construct log likelihood errors using Poisson distribution'''
  # likelihood = P(value|lambda) using underlying Poisson assumption
  n_samples = 1000
  lambdas, loglikelihoods = zip( *( (x,-2*poisson.logpmf(value,x)) for x in np.linspace(0,2*value+1,n_samples) ) )
  interpolated_ll = interpolate.interp1d(lambdas, loglikelihoods)
  # up error: lambda for which interpolated_ll(lambda) = interpolated_ll(value) + 1
  # down error: lambda for which interpolated_ll(lambda) = interpolated_ll(value) + 1
  ll_at_value, lambda_up, lambda_down, step_size = interpolated_ll(value), 1.1*value, 0.9*value, float(value)/10
  for i in range(5) :
    lambda_up -= step_size; lambda_down += step_size; step_size /= 10
    while interpolated_ll(lambda_down) - ll_at_value < 1 : lambda_down -= step_size
    while interpolated_ll(lambda_up) - ll_at_value < 1 : lambda_up += step_size
  return (value-lambda_down,lambda_up-value)
Example #57
0
def fast_deeming(times, values, pad_n=None):
    """ Interpolate time values to an even grid then run an FFT

    returns (frequencies, amplitudes)

    Input
    -----
    times : numpy array containing time values
    values: numpy array containing measurements
    pad_n : (optional) Calculate fft of this size. If this is larger than the
    input data, it will be zero padded. See numpy.fft.fft's help for details.


    Output
    ------
    frequencies: numpy array containing frequencies
    amplitudes : numpy array containing amplitudes
    even_times : numpy array containing interpolated times
    even_values: numpy array containing interpolated values

    Details
    -------
    Time values are interpolated to an even grid from min(times) to max(times)
    containing times.size values. Interpolation is done using linear spline
    method.

    NOTE: This may not give you results as precise as deeming(), the
    interpolation may cause spurious effects in the fourier spectrum. This
    method is however, very fast for large N, compared to deeming()

    NOTE: This method strips nan from arrays first.
    """
    valid = find_nan(values)
    values = values[valid]
    times = times[valid]

    interpolator = interpolate.interp1d(times, values)

    even_times = np.linspace(times.min(), times.max(), times.size)
    even_vals = interpolator(even_times)
    if pad_n:
        amplitudes = np.abs(np.fft.fft(even_vals, pad_n))
    else:
        amplitudes = np.abs(np.fft.fft(even_vals, 2 * even_vals.size))

    amplitudes *= 2.0 / times.size
    frequencies = np.fft.fftfreq(amplitudes.size, d=even_times[1] - even_times[0])
    pos = frequencies >= 0

    return frequencies[pos], amplitudes[pos], even_times, even_vals
    def beta_nllk(beta): 
        'main call'
        gs_dx = lSolver.solve_hittime_distn_per_parameter(1./beta,
                                                         mu_sigma,
                                                          alphas)
        while any( isnan(gs_dx) )  :
            print 'WARNING: repeating calculation! due nans'
            gs_dx = lSolver.solve_hittime_distn_per_parameter(1./beta,mu_sigma,  alphas)
           
#            if norm_const<1.:
#                lSolver.setTf(lSolver.getTf()+1 );
#                alphas = alphaF(lSolver._ts)
        norm_const = sum(gs_dx[1:]*diff(ts));
        if abs(norm_const - 1) > 1e-2:
            print 'WARNING: retrying calculation! due to non-normalized g'
            gs_dx = lSolver.solve_hittime_distn_per_parameter(1./beta,mu_sigma,  alphas)
            norm_const = sum(gs_dx[1:]*diff(lSolver._ts));
            
        'interpolate data and log-likelihood:'
        gs_interp = interp1d( lSolver._ts, gs_dx,
                              kind = 'nearest') 
         
        gs_interpolated = gs_interp(thitsSet);  
        
#        if contains(set(betas_sweep[0::4], beta)):
        nllk = -sum(log(gs_interpolated ) )
        
        visualize = isnan(nllk) or nllk > 1095 or nllk < 1000
        #False and in1d([beta], betas_sweep[0::4])[0];
        if visualize:
            ''' visualize'''        
            figure()   
            hold(True);
            hist(thitsSet, bins = 500, normed = True, label='empirical')
            plot(lSolver._ts, gs_dx, label='FD g', linewidth = 3)
            plot(thitsSet, gs_interpolated, label='interpolatedd g', linewidth = 3)
            
            title_tag = ' beta_used = %.2f' %(beta )
            title(title_tag)    
            xlabel(r'$t$',   fontsize=xlabel_font_size);
            ylabel('$g(t)$', fontsize=xlabel_font_size)                
            legend()
        
        'diagnostics:' 
        print 'negative probs: finite-diffs = %d, interpolated=%d'%(sum(gs_dx <= 0), sum( gs_interpolated <= 0 ))
        print 'sum (g), beta, nllk', norm_const, beta, nllk
        
        '''return'''
        return  nllk ;