Beispiel #1
0
def scaling_printer(age, mass, radius):
    if not isnan(age):
        print('Age:', '{:.2u}'.format(age), '[Gyr]')

    if not isnan(mass):
        print('Mass:', '{:.2u}'.format(mass), '[solar masses]')

    if not isnan(radius):
        print('Radius:', '{:.2u}'.format(radius), '[solar radii]')
Beispiel #2
0
 def get_reactions_with_all_known_KM(self):
     reaction_list = []        
     for r in self.reactions:
         if r.km_sparse:
             if not any(unumpy.isnan(r.km_sparse.values())):
                 reaction_list.append(r)
     return reaction_list
Beispiel #3
0
def nonan(fv):
    """Removes spectrums with nans in them."""
    ret = []
    for f in fv.T:
        if np.any(unp.isnan(f)):
            continue
        ret.append(f)
    return np.array(ret)
Beispiel #4
0
def nanmed(fvals):
    """Does median of non-nan values.

    fvals(list): the input values -> rows will be averaged over
    """
    fvar_vals = []
    for fv in fvals:
        fvar_vals.append(np.median(fv[unp.isnan(fv) is False]))
    return fvar_vals
Beispiel #5
0
def nanmean(fvals):
    """Does nanmean ... numpy was giving a strange result.

    fvals(list): the input values -> rows will be averaged over
    """
    fvar_vals = []
    for fv in fvals:
        fvar_vals.append(np.mean(fv[unp.isnan(fv) is False]))
    return fvar_vals
Beispiel #6
0
 def substrate_saturation_effect(self):
     prod = self._get_prod_s_by_ks()
     s = prod / (1+prod)
     for i,r in enumerate(self.reactions):
         if not unumpy.isnan(s)[0,i]:
             r.saturation = s[0,i]
         else:
             r.saturation = np.nan
     return s
Beispiel #7
0
 def backward_flux_effects(self):
     tmp = self.dG_prime
     tmp = np.clip(tmp, -200, 200, out=tmp)
     t = -unumpy.expm1(tmp / (self.R * self.T))
     for i,r in enumerate(self.reactions):
         if not unumpy.isnan(t)[0,i]:
             r.backward = t[0,i]
         else:
             r.backward = np.nan
     return t    
def calcGasMass(DMpc, FHI, FCO, FCOerr):
	# Calculate HI gas mass
	FHI = unumpy.uarray(FHI,(FHI*0.2))
	MHI = (2.356E5)*((DMpc)**2.)*(FHI)
	# Calculate H2 gas mass
	FCO = unumpy.uarray(FCO,FCOerr)
	MH2 = 7845*FCO*((DMpc)**2.)
	where_are_nans = unumpy.isnan(MH2)
	MH2[where_are_nans] = 0
	# Total gas mass
	Mgas = MHI + MH2
	return MHI, MH2, Mgas
Beispiel #9
0
def mag(lc: LightCurve) -> LightCurve:
    """
    Converts and normalizes a LighCurve object to magnitudes.

    :param lc: lightcurve object
    :return: reduced light curve object
    """

    lc = lc.remove_nans()

    flux = lc.flux + (np.abs(2 * np.amin(lc.flux)) if np.amin(lc.flux) < 0 else 100)
    flux = unp.uarray(flux, lc.flux_err)
    flux = -2.5 * unp.log10(flux)
    flux = flux[~unp.isnan(flux)]
    flux -= np.median(flux)

    lc.flux = unp.nominal_values(flux) * u.mag
    lc.flux_err = unp.std_devs(flux) * u.mag
    return lc
Beispiel #10
0
def calculate_emc(dimuon_df, spill_df, bin_edges=[], qie_correction=False,
                  truth_mc=False, tracked_mc=False):
    # We will be grouping everything into these bins, so let's get it binned

    if not truth_mc and not tracked_mc:
        # Keep track of target attributes
        live_p_df = get_livep_from_spill(spill_df)
        if live_p_df is None:
            print ("Error getting live proton df from spill_df. Exiting...")
            return None

    # Binning very important. If no bins specified, use default.
    if len(bin_edges) == 0:
        bin_edges = [0.08, 0.14, 0.16, 0.18, 0.21, 0.25, 0.31, 0.53]

    # Create names of indexes that specify bin ranges.
    xranges = []
    for i in range(0,len(bin_edges)-1):
        xranges.append(("(%.02f, %.02f]" % (bin_edges[i], bin_edges[i+1])))

    if truth_mc:
        group_cols = ['mass', 'xF', 'xT', 'xB', 'dz', 'dpx', 'dpy', 'dpz',
                      'phi_gam', 'phi_mu', 'theta_mu', 'weight', 'weight_sq']
    elif tracked_mc:
        group_cols = ['mass', 'dz', 'dpz', 'dpt', 'pz1', 'pz2', 'pt1', 'pt2',
                      'xF', 'xB', 'xT', 'costh', 'phi', 'trackSeparation',
                      'chisq_dimuon', 'weight', 'weight_sq']
    else:
        group_cols = ['mass', 'dz', 'dpz', 'dpt', 'pz1', 'pz2', 'pt1', 'pt2',
                      'xF', 'xB', 'xT', 'costh', 'phi', 'trackSeparation',
                      'chisq_dimuon', 'QIESum', 'weight', 'weight_sq']

    if not all(col in dimuon_df.columns for col in group_cols):
        print ("Not all of these columns are in the dimuon_df:")
        print group_cols
        print ("Please add them or alter this analysis code. Exiting...")
        return None


    groups = dimuon_df[group_cols].groupby(by=[dimuon_df.target,
                                               pd.cut(dimuon_df.xT,
                                                      bin_edges)])

    # Calculate the counts in each bin
    if qie_correction:
        dimuon_df_copy = dimuon_df[unp.isnan(dimuon_df['weight']) == False].copy()
        counts = dimuon_df_copy[group_cols].groupby(
            by=[dimuon_df_copy.target, pd.cut(dimuon_df_copy.xT, bin_edges)]).weight.sum()
        unc_df = pd.DataFrame(dimuon_df_copy[group_cols].groupby(
            by=[dimuon_df_copy.target, pd.cut(dimuon_df_copy.xT, bin_edges)]).weight_sq.sum())
        unc_df = unc_df.apply(unp.sqrt, axis=0)
        del dimuon_df_copy
    else:
        if truth_mc or tracked_mc:
            counts = dimuon_df[group_cols].groupby(
                by=[dimuon_df.target, pd.cut(dimuon_df.xT, bin_edges)]).weight.sum()
            unc_df = pd.DataFrame(dimuon_df[group_cols].groupby(
                by=[dimuon_df.target, pd.cut(dimuon_df.xT, bin_edges)]).weight_sq.sum())
            unc_df = unc_df.apply(unp.sqrt, axis=0)
        else:
            counts = dimuon_df[group_cols].groupby(
                by=[dimuon_df.target, pd.cut(dimuon_df.xT, bin_edges)]).dpz.count()
    counts_df = pd.DataFrame(counts)    
    
    # Calculate the mean kinematic values in each bin for each target
    means_df = groups[group_cols].mean()
    std_dev_df = groups[group_cols].std()
    means_df = pd.DataFrame(unp.uarray(means_df, std_dev_df),
                            columns=means_df.columns,
                            index=means_df.index)


    # Add the counts to the means dataframe
    means_df['counts'] = counts_df

    if qie_correction or truth_mc or tracked_mc:
        means_df['unc'] = unc_df
        means_df['counts'] = unp.uarray(means_df['counts'],
                                        means_df['unc'])
        means_df.drop('unc', axis=1, inplace=True)
    else:
        means_df['counts'] = unp.uarray(means_df['counts'],
                                        np.sqrt(means_df['counts']))

    # When working with low-stat data, there may be some NaN's. Handle them.
    means_df = means_df.applymap(fix_nan)

    # Normalize to live proton count
    if truth_mc or tracked_mc:
        means_df['ncounts'] = means_df['counts']
    else:
        means_df['ncounts'] = means_df['counts'] / live_p_df['liveProton_x10^16']

    if not truth_mc and not tracked_mc:
        # Subtract Empty counts from LD2 and LH2 counts
        # Subtract None counts from C, Fe, and W counts
        means_df = subtract_empty_none_bg(means_df)
    else:
        means_df['ncounts_bg'] = means_df['ncounts']
   
    means_df['ncounts_bg_ctm'] = None
    if not truth_mc and not tracked_mc:
        # 5. Use LH2 and LD2 data, proportions, to get Deuterium counts
        a = 1.00
        b = 0.00
        c = 0.0714
        d = 0.9152
        scale = 1.0 / (c + d)
        c = scale * c
        d = scale * d
        c = 0.0724
        d = 0.9276

        contam_adjusted_vals = np.multiply(
            np.add(
                np.multiply(means_df.ix[['LD2']].ncounts_bg.values, a),
                np.negative(np.multiply(means_df.ix[['LH2']].ncounts_bg.values, c))
            ), (1 / (d * a - b * c))
        )

        means_df.set_value('LD2', 'ncounts_bg_ctm', contam_adjusted_vals)
    else:
        means_df.set_value('LD2', 'ncounts_bg_ctm', means_df.ix[['LD2']]['ncounts_bg'].values)

    if truth_mc or tracked_mc:
        tmp_target_df = mc_target_df.copy()
    else:
        tmp_target_df = target_df.copy()


    # 7. Calculate LD2/LH2 ratio values
    ratio_list = []
    ratio_label_list = []
    if all(target in means_df.index for target in ('LH2', 'LD2')):
        ratio = np.divide(
            means_df.ix[['LD2']]['ncounts_bg_ctm'].values / tmp_target_df.ix['LD2'].Scale,
            means_df.ix[['LH2']]['ncounts_bg'].values / tmp_target_df.ix['LH2'].Scale)
        ratio_list.append(ratio)
        ratio_label_list.append('D/H')
    if all(target in means_df.index for target in ('C', 'LD2')):
        ratio = np.divide(means_df.ix[['C']]['ncounts_bg'].values / tmp_target_df.ix['C'].Scale,
                          means_df.ix[['LD2']]['ncounts_bg_ctm'].values / tmp_target_df.ix['LD2'].Scale)
        ratio_list.append(ratio)
        ratio_label_list.append('C/D')
    if all(target in means_df.index for target in ('Fe', 'LD2')):
        ratio = np.divide(means_df.ix[['Fe']]['ncounts_bg'].values / tmp_target_df.ix['Fe'].Scale,
                          means_df.ix[['LD2']]['ncounts_bg_ctm'].values / tmp_target_df.ix['LD2'].Scale)
        ratio_list.append(ratio)
        ratio_label_list.append('Fe/D')
    if all(target in means_df.index for target in ('W', 'LD2')):
        ratio = np.divide(means_df.ix[['W']]['ncounts_bg'].values / tmp_target_df.ix['W'].Scale,
                          means_df.ix[['LD2']]['ncounts_bg_ctm'].values / tmp_target_df.ix['LD2'].Scale)
        ratio_list.append(ratio)
        ratio_label_list.append('W/D')
    if all(target in means_df.index for target in ('C', 'LD2')):
        ratio = np.divide(means_df.ix[['C']]['ncounts_bg'].values / tmp_target_df.ix['C'].Scale,
                          means_df.ix[['LH2']]['ncounts_bg'].values / tmp_target_df.ix['LH2'].Scale)
        ratio_list.append(ratio)
        ratio_label_list.append('C/H')
    if all(target in means_df.index for target in ('Fe', 'LH2')):
        ratio = np.divide(means_df.ix[['Fe']]['ncounts_bg'].values / tmp_target_df.ix['Fe'].Scale,
                          means_df.ix[['LH2']]['ncounts_bg'].values / tmp_target_df.ix['LH2'].Scale)
        ratio_list.append(ratio)
        ratio_label_list.append('Fe/H')
    if all(target in means_df.index for target in ('W', 'LH2')):
        ratio = np.divide(means_df.ix[['W']]['ncounts_bg'].values / tmp_target_df.ix['W'].Scale,
                          means_df.ix[['LH2']]['ncounts_bg'].values / tmp_target_df.ix['LH2'].Scale)
        ratio_list.append(ratio)
        ratio_label_list.append('W/H')
    if all(target in means_df.index for target in ('Fe', 'C')):
        ratio = np.divide(means_df.ix[['Fe']]['ncounts_bg'].values / tmp_target_df.ix['Fe'].Scale,
                          means_df.ix[['C']]['ncounts_bg'].values / tmp_target_df.ix['C'].Scale)
        ratio_list.append(ratio)
        ratio_label_list.append('Fe/C')
    if all(target in means_df.index for target in ('W', 'C')):
        ratio = np.divide(means_df.ix[['W']]['ncounts_bg'].values / tmp_target_df.ix['W'].Scale,
                          means_df.ix[['C']]['ncounts_bg'].values / tmp_target_df.ix['C'].Scale)
        ratio_list.append(ratio)
        ratio_label_list.append('W/C')
    
    bin_centers = means_df['xT']['LD2'].values

    emc_df = pd.DataFrame([bin_centers] + ratio_list,
                          columns=xranges,
                          index=['xT'] + ratio_label_list).T

    return emc_df, means_df
Beispiel #11
0
barPlotYLower = -400
barPlotYUpper = 900

####################################
# Process data for Jan, Feb, March #
####################################
if plot123:
    xcoords, ycoords, eksOntoShelfVals, altiOntoShelfVals, skimOntoShelfVals, truthOntoShelfVals = get_mean_month_data(
        monthIndices123, data)
    eksOntoShelfLong = np.nanmean(eksOntoShelfVals, axis=1)
    altiOntoShelfLong = np.nanmean(altiOntoShelfVals, axis=1)
    skimOntoShelfLong = np.nanmean(skimOntoShelfVals, axis=1)
    truthOntoShelfLong = np.nanmean(truthOntoShelfVals, axis=1)

    #remove all nans
    toKeep = (unp.isnan(eksOntoShelfLong)
              == False) | (unp.isnan(altiOntoShelfLong) == False) | (
                  unp.isnan(skimOntoShelfLong)
                  == False) | (unp.isnan(truthOntoShelfLong) == False)
    xcoords = xcoords[toKeep]
    ycoords = ycoords[toKeep]
    eksOntoShelfLong = eksOntoShelfLong[toKeep]
    altiOntoShelfLong = altiOntoShelfLong[toKeep]
    skimOntoShelfLong = skimOntoShelfLong[toKeep]
    truthOntoShelfLong = truthOntoShelfLong[toKeep]

    #Converts coordinates into lon/lat.
    lats = []
    lons = []
    for i in range(len(xcoords)):
        lon, lat = su.convert_index_to_lonlat(xcoords[i],
Beispiel #12
0
def get_fom(fnisotope,
            fname_exp,
            fname_bg,
            fwhm_pars,
            measure_time_exp,
            measure_time_bg,
            idets,
            Efit_low,
            Efit_high,
            do_plot=True,
            printout=False,
            hotfix_low=None):
    """ get figure of merrit

    fnisotope: str, like "60Co", or "152Eu"
    """
    fname_sims = []
    for file in os.listdir('mama_spectra/root_files'):
        if fnmatch.fnmatch(file, f'grid_9_*{fnisotope}*_all.m'):
            fname_sims.append(os.path.join("mama_spectra/root_files", file))
    fname_sims.sort()
    grid_points = np.full_like(fname_sims, np.nan, dtype=float)
    foms = np.zeros((len(fname_sims), 3))

    for i, fname_sim in enumerate(tqdm(fname_sims)):
        # if i > 2:
        #     break
        if printout:
            print("fitting: ", fname_sim)
        sc = SpectrumComparison()
        sc.get_data(fname_sim,
                    fname_exp,
                    fname_bg,
                    fwhm_pars,
                    measure_time_exp,
                    measure_time_bg,
                    idet=idets,
                    recalibrate=True)
        sc.scale_sim_to_exp_area(Efit_low, Efit_high)

        xsim = sc.xsim
        fexp = sc.fexp
        fsim_scaled = sc.fsim_scaled
        Emax_hotfix = 200
        if fnisotope == "60Co":
            denom = fsim_scaled(xsim)
            denom[denom == 0] = np.nan
            hotfix_low = fexp(xsim) / denom
            hotfix_low[unumpy.isnan(hotfix_low)] = 1
            hotfix_low[xsim > 200] = 1
            sc.uysim_scaled *= hotfix_low
            sc.fsim_scaled = uinterp1D(xsim, sc.uysim_scaled)
        else:
            pass
            sc.uysim_scaled *= hotfix_low
            sc.fsim_scaled = uinterp1D(xsim, sc.uysim_scaled)
            # denom = fsim_scaled(xsim)
            # denom[denom==0] = np.nan
            # hotfix_low = fexp(xsim)/denom
            # sc.uysim_scaled *= hotfix_low

        print("scale factor:", sc.scale_factor)
        chi2 = sc.get_chi2()
        rel_diff, rel_diff_smooth = sc.get_rel_diff(smooth_window_keV=20)

        foms[i, :] = sc.fom(Ecompare_low, Ecompare_high, printout=False)
        # print(sc.fom(Ecompare_low, Ecompare_high))
        grid_points[i] = int(re.search(r"grid_(\d*)_", fname_sim)[1])
        if do_plot:
            fig, _ = sc.plots(title=fname_sim, xmax=1400)
        fig.savefig(f"figs_hotfix_low/{fnisotope}_{grid_points[i]:.0f}.png")
        # plt.show()
        plt.close(fig)

    if printout:
        ltab = [[name, *foms[i, :]] for i, name in enumerate(fname_sims)]
        print("\nComparisons between {} and {} keV:".format(
            Ecompare_low, Ecompare_high))
        print(
            tabulate(ltab,
                     headers=[
                         "Name", "chi2", "rel_diff[%]", "rel_diff_smoothed[%]"
                     ],
                     floatfmt=".2f"))
    df = pd.DataFrame(foms,
                      columns=[
                          f"chi2_{fnisotope}", f"rel_diff_{fnisotope}",
                          f"rel_diff_smoothed_{fnisotope}"
                      ])
    df["grid_point"] = grid_points
    df = df[df.grid_point.notnull()]  # workaround if going through whole loop
    df = df.astype({"grid_point": 'int'}, copy=False)

    return df, hotfix_low
Beispiel #13
0
        for r in self.reactions:
            if r.km_sparse:
                if not any(unumpy.isnan(r.km_sparse.values())):
                    reaction_list.append(r)
        return reaction_list

    def get_reactions_with_all_known_S(self):
        reaction_list = []        
        for r in self.reactions:
            cids = set(r.kegg_reaction.keys())
            if cids.issubset(set(self.measured_cids)):
                reaction_list.append(r)
        return reaction_list
        
if __name__ == "__main__":
    
    from catalytic_rates import rates
    R = rates()    
    
    index = R.kcat.index & R.kmax.index
    reactions = [R.rxns[r] for r in index]
    for gc in ['glc']:#, 'ac', 'glyc']:                
        out = pd.DataFrame(index=index, columns=['under saturation', 'backward flux'])
        mm = MM_kinetics(reactions, gc)
        for r in mm.reactions:
            if not unumpy.isnan(r.saturation) and not unumpy.isnan(r.backward):
                out['under saturation'][r.id] = r.saturation.n
                out['backward flux'][r.id] = r.backward.n
        out.dropna(inplace=True)
#        out.to_csv("../res/conc_dependant_effects_on_%s.csv" %gc, sep='\t')