Пример #1
0
def get_braganca_samples(df=None, size=1):
    # Get the pdf function
    vsini_pdf = get_braganca_pdf()
    v = np.arange(0, 500, 0.1)
    P = vsini_pdf(v)

    # Convert to cdf
    cdf = Priors.get_cdf(v, P)

    # Remove duplicate cdf values
    tmp = pd.DataFrame(data=dict(cdf=cdf, velocity=v))
    tmp.drop_duplicates(subset=['cdf'], inplace=True)
            
    # Calculate the inverse cdf
    inv_cdf = spline(tmp.cdf.values, tmp.velocity.values, k=1)
            
    # Calculate samples from the inverse cdf
    velocity_samples = inv_cdf(np.random.uniform(size=size))

    return velocity_samples
Пример #2
0
def get_zr2011_velocity(mass, size=1e4, reduce_arr=True, df=None):
    """ 
    Get samples from the velocity pdf as tabulated
    by Table 4 in Zorec & Royer (2011)
    
    Parameters:
    ============
    - mass:       The mass of the star (in solar masses)
    - size:       The number of samples 
    - reduce_arr: Boolean flag. Should re only take the unique masses or all of them?
    - df:         A dataframe with all of the necessary columns. Don't give this 
                  unless you know what you are doing!

    Returns:
    ========
    Samples from the PDF for the equatorial velocity (in km/s)
    """

    # Read in and update the columns a bit.
    if df is None:
        df = pd.read_csv('data/velocity_pdfs.csv', header=1)
        df['mid_mass'] = (df.mass_high + df.mass_low) / 2.0
        df['slow_alpha'] = df.slow_mu / np.sqrt(2)
        df['fast_alpha'] = df.fast_mu / np.sqrt(2)
        df['slow_frac'] /= 100.0
        df['fast_frac'] /= 100.0

    # Reduce the size of the mass array, if requested
    if reduce_arr:
        mass = np.unique(mass)

    # Interpolate the parameters as a function of mass
    columns = ['slow_frac', 'slow_alpha', 'fast_frac', 'fast_alpha', 'fast_l']
    interpolators = {col: spline(df.mid_mass, df[col], k=1) for col in columns}
    pars = {col: interpolators[col](mass) for col in interpolators.keys()}

    # Make large arrays. This may be memory intensive!
    v_arr = np.arange(0, 500, 0.1)
    v, slow_frac = np.meshgrid(v_arr, pars['slow_frac'])
    v, slow_alpha = np.meshgrid(v_arr, pars['slow_alpha'])
    v, fast_frac = np.meshgrid(v_arr, pars['fast_frac'])
    v, fast_alpha = np.meshgrid(v_arr, pars['fast_alpha'])
    v, fast_l = np.meshgrid(v_arr, pars['fast_l'])

    # Get the probability for each mass point
    prob = (slow_frac*maxwellian(v=v, alpha=slow_alpha, l=0.0) + 
            fast_frac*maxwellian(v=v, alpha=fast_alpha, l=fast_l))

    # Marginalize over the mass
    P_avg = np.mean(prob, axis=0)

    # Convert to cdf
    cdf = Priors.get_cdf(v_arr, P_avg)

    # Remove duplicate cdf values
    tmp = pd.DataFrame(data=dict(cdf=cdf, velocity=v_arr))
    tmp.drop_duplicates(subset=['cdf'], inplace=True)
            
    # Calculate the inverse cdf
    inv_cdf = spline(tmp.cdf.values, tmp.velocity.values, k=1)
            
    # Calculate samples from the inverse cdf
    velocity_samples = inv_cdf(np.random.uniform(size=size))

    return velocity_samples