def get_completeness(starname, date):
    """ 
    Get the completeness as a function of mass-ratio for the 
    given star and observation date.
    """

    # Read in the completeness vs. temperature
    with h5py.File('data/Completeness.h5', 'r') as comp:
        if starname in comp.keys() and date in comp[starname].keys():
            ds = comp[starname][date]['marginalized']
            comp_df = pd.DataFrame(data=ds.value, columns=ds.attrs['columns'])
            comp_df['Instrument'] = ds.attrs['Instrument']
        else:
            logging.warn('starname/date combination not found in dataset!')
            return None

    # Get the primary mass 
    spt = StarData.GetData(starname).spectype
    primary_mass_samples, _ = Priors.get_primary_mass(starname, spt)
    comp_df['M1'] = np.median(primary_mass_samples)

    # Convert the companion temperature to mass using spectral type relations 
    spt_int = Priors.SpectralTypeInterpolator()
    comp_df['M2'] = comp_df.Temperature.map(spt_int)

    # Make mass ratio and return
    comp_df['q'] = comp_df.M2 / comp_df.M1

    # Make sure all columns are floats
    for col in ['Temperature', 'Detection_Rate', 'vsini', 'M1', 'M2', 'q']:
        comp_df[col] = pd.to_numeric(comp_df[col], errors='coerce')
    return comp_df 
Example #2
0
def PrintInfo():
    """ Function to write an info file, for use with plotting,
    and for saving Multinest options, likelihood functions and
    priors.
    """
    # Import variable inside function, to avoid circular module
    # imports.
    import MNOptions as MN
    import Priors
    import Likelihood

    infofile = MN.outputfiles_basename + '.info'
    info = open(infofile, 'w')

    info.write('''# SuperPy info file, with cube indicies.
# This info file can be used for plot labels.\n''')

    for key, value in label.iteritems():
        # Add 1 to the keys, so that labelling begins at 1, rather than 0.
        # This is identical to the SuperBayeS info file convention.
        key = str(int(key) + 1)
        info.write('''lab%s=%s\n''' % (key, value))

    # Write the prior types and arguements.
    info.write('''# Priors.\n''')
    for key in Priors.CMSSMModelTracker().param:
        info.write('''%s %s %s\n''' %
                   (key, Priors.CMSSMModelTracker().param[key].type,
                    Priors.CMSSMModelTracker().param[key].arg))

    # Write the likelihoods types and arguments.
    info.write('''# Likelihoods.\n''')
    for key in Likelihood.CMSSMConstraintTracker().constraint:
        info.write(
            '''%s %s %s\n''' %
            (key, Likelihood.CMSSMConstraintTracker().constraint[key].type,
             Likelihood.CMSSMConstraintTracker().constraint[key].arg))

    # Write the MN parameters.
    info.write('''# MultiNest options.\n''')
    for vars in dir(MN):
        if not vars.startswith("__"):
            info.write('''%s %s\n''' % (vars, getattr(MN, vars)))
    info.close()
def get_braganca_samples(df=None, size=1):
    # Get the pdf function
    vsini_pdf = get_braganca_pdf()
    v = np.arange(0, 500, 0.1)
    P = vsini_pdf(v)

    # Convert to cdf
    cdf = Priors.get_cdf(v, P)

    # Remove duplicate cdf values
    tmp = pd.DataFrame(data=dict(cdf=cdf, velocity=v))
    tmp.drop_duplicates(subset=['cdf'], inplace=True)
            
    # Calculate the inverse cdf
    inv_cdf = spline(tmp.cdf.values, tmp.velocity.values, k=1)
            
    # Calculate samples from the inverse cdf
    velocity_samples = inv_cdf(np.random.uniform(size=size))

    return velocity_samples
Example #4
0
def myloglike(cube, ndim, nparams):
    """ MultiNest callback function.
    Calculate the model's predictions (by calling external programs),
    saves the extra parameters in the cube, and returns log likelihood to Multinest.

    Arguments:
    cube -- Unit hypercube from which model parameters are mapped.
    ndim -- Number of model paramers.
    nparams -- Total number of parameters in the cube.

    Returns: The total log likelihood for the model parameter
    point under consideration.

    """
    # Set up constraints class.
    Constraints = CNMSSMConstraintTracker()

    # Copy cube to constraints, so it can work out predictions etc.
    for i, name in enumerate(
            sorted(Priors.CNMSSMModelTracker().param.keys(), key=str.lower)):
        Constraints.param[name] = cube[i]

    # Set predictions and loglikes.
    Constraints.SetPredictions()
    Constraints.SetLogLike()

    # Copy constraints to cube.
    for name in sorted(Constraints.constraint.keys(), key=str.lower):
        Cube.AddCube(cube, Constraints.constraint[name].theory, Cube.label,
                     name)

    # Copy associated chi2s to cube. Better to print chi2 than loglike,
    # beceause MultiNest prints chi2 and they can be treated in the
    # same way when plotting.
    for name in sorted(Constraints.constraint.keys(), key=str.lower):
        Cube.AddCube(cube, -2 * Constraints.constraint[name].loglike,
                     Cube.label, 'chi2:' + name)

    # Copy SLHA masses to the cube.
    for key in Constraints.masses:
        Cube.AddCube(cube, Constraints.masses[key], Cube.label,
                     'Mass:' + str(key))

    # Copy mu-parameter to the cube.
    Cube.AddCube(cube, Constraints.mu, Cube.label, 'mu')

    # Copy neutralino mixing to the cube.
    for key in Constraints.neutralino:
        Cube.AddCube(cube, Constraints.neutralino[key], Cube.label,
                     'NMIX:' + str(key))

    # Print-out cube for debugging.
    print 'Predictions:'
    for label, param in zip(Cube.label.itervalues(), cube):
        print label, param
    print 'Total loglike', Constraints.loglike

    # Start cube count from 0 again.
    Cube.AddCube.reset()

    # Print an info file for the cube.
    # Decorator insures this is printed only once.
    # Point must be physical, else the info will be incomplete.
    if Constraints.physical:
        Cube.PrintInfo()

    # Return the log likelihood to MultiNest.
    return Constraints.loglike
def get_zr2011_velocity(mass, size=1e4, reduce_arr=True, df=None):
    """ 
    Get samples from the velocity pdf as tabulated
    by Table 4 in Zorec & Royer (2011)
    
    Parameters:
    ============
    - mass:       The mass of the star (in solar masses)
    - size:       The number of samples 
    - reduce_arr: Boolean flag. Should re only take the unique masses or all of them?
    - df:         A dataframe with all of the necessary columns. Don't give this 
                  unless you know what you are doing!

    Returns:
    ========
    Samples from the PDF for the equatorial velocity (in km/s)
    """

    # Read in and update the columns a bit.
    if df is None:
        df = pd.read_csv('data/velocity_pdfs.csv', header=1)
        df['mid_mass'] = (df.mass_high + df.mass_low) / 2.0
        df['slow_alpha'] = df.slow_mu / np.sqrt(2)
        df['fast_alpha'] = df.fast_mu / np.sqrt(2)
        df['slow_frac'] /= 100.0
        df['fast_frac'] /= 100.0

    # Reduce the size of the mass array, if requested
    if reduce_arr:
        mass = np.unique(mass)

    # Interpolate the parameters as a function of mass
    columns = ['slow_frac', 'slow_alpha', 'fast_frac', 'fast_alpha', 'fast_l']
    interpolators = {col: spline(df.mid_mass, df[col], k=1) for col in columns}
    pars = {col: interpolators[col](mass) for col in interpolators.keys()}

    # Make large arrays. This may be memory intensive!
    v_arr = np.arange(0, 500, 0.1)
    v, slow_frac = np.meshgrid(v_arr, pars['slow_frac'])
    v, slow_alpha = np.meshgrid(v_arr, pars['slow_alpha'])
    v, fast_frac = np.meshgrid(v_arr, pars['fast_frac'])
    v, fast_alpha = np.meshgrid(v_arr, pars['fast_alpha'])
    v, fast_l = np.meshgrid(v_arr, pars['fast_l'])

    # Get the probability for each mass point
    prob = (slow_frac*maxwellian(v=v, alpha=slow_alpha, l=0.0) + 
            fast_frac*maxwellian(v=v, alpha=fast_alpha, l=fast_l))

    # Marginalize over the mass
    P_avg = np.mean(prob, axis=0)

    # Convert to cdf
    cdf = Priors.get_cdf(v_arr, P_avg)

    # Remove duplicate cdf values
    tmp = pd.DataFrame(data=dict(cdf=cdf, velocity=v_arr))
    tmp.drop_duplicates(subset=['cdf'], inplace=True)
            
    # Calculate the inverse cdf
    inv_cdf = spline(tmp.cdf.values, tmp.velocity.values, k=1)
            
    # Calculate samples from the inverse cdf
    velocity_samples = inv_cdf(np.random.uniform(size=size))

    return velocity_samples
Example #6
0
import Likelihood  # For setting callback function and number of constraints.

# Make chains directory for MultiNest.
if not os.path.exists("chains"):
    os.mkdir("chains")

# File names - best to convert to absolute path here.
outputfiles_basename = os.path.abspath('./chains/CMSSM')

# These are the settings that are passed to MultiNest.
# Loglike callback function.
LogLikelihood = Likelihood.myloglike
# Prior callback function.
Prior = Priors.myprior
# Number of model parameters.
n_dims = len(Priors.CMSSMModelTracker().param)
# Total number of parameters in cube.
# This is model parameters + constraints + chi2 from constraints + 33
# sparticle/particle masses + mu + neutralino mixing.
n_params = n_dims + 2 * \
    len(Likelihood.CMSSMConstraintTracker().constraint) + 33 + 1 + 16
# Which parameters to mode cluster, first n.
n_clustering_params = 2
# Should be a vector of 1s and 0s - 1 = wrapped, 0 = unwrapped.
wrapped_params = None
# Whether to separate modes.
multimodal = True
const_efficiency_mode = False
n_live_points = 10
evidence_tolerance = 1.0
sampling_efficiency = 2.0