def p_value_b(gamma_k, f, rs, nBDs, Tobs_R, robs_R, Mobs_R, heat_int_R, relT_R,
              relM_R, relR_R, relA_R, steps=300):
    """                                                                        
    Return p-value for gamma_k @ (f, rs) under b hypothesis
    """                                                                        
    # Compute TS pdf                                                           
    TS_k  = np.zeros(steps)                                                    
    # Load ATMO2020 model                                                      
    path   = "/home/mariacst/exoplanets/exoplanets/data/"                      
    data   = np.genfromtxt(path + "./ATMO_CEQ_vega_MIRI.txt", unpack=True)     
    points = np.transpose(data[0:2, :])                                        
    values = data[2]                                                           
                                                                               
    for i in range(steps):                                                     
        # Generate experiments under s+b hypothesis
        robs, Tobs, Mobs, ages = mock_population_all(nBDs, relT_R, relM_R, relR_R,
                                         relA_R, 0., gamma_k, rs)              
        # Predicted intrinsic temperatures
        xi       = np.transpose(np.asarray([ages, Mobs]))
        Teff     = griddata(points, values, xi)
        heat_int = heat(Teff, np.ones(len(Teff))*R_jup.value)
        # TS
        TS_k[i] = TS(gamma_k, f, rs, Tobs, robs, Mobs, heat_int)
    # observed TS
    q_gamma_k_obs = TS(gamma_k, f, rs, Tobs_R, robs_R, Mobs_R, heat_int_R)
    # return
    return (100-percentileofscore(TS_k, q_gamma_k_obs, kind="strict"))
Esempio n. 2
0
def mock_population(N,
                    rel_unc_Tobs,
                    rel_mass,
                    f_true,
                    gamma_true,
                    rs_true,
                    rho0_true=0.42,
                    Tmin=0.):
    """
    Generate N observed exoplanets

    Assumptions
    -----------
    1) N observed exoplanets distributed according to E2 bulge + BR disc
    2) (All) exoplanets radius = Rjup
    3) BD evolution model taken from ATMO 2020
    4) BDs have masses chosen between 14-55 Mjup assuming power-law IMF and
       unifrom age distribution between 1-10 Gyr
    5) Tobs has relative uncertainty rel_unc_Tobs
    6) Estimated masses have an uncertainty of rel_mass
    """
    #np.random.seed(42)
    _N = int(4.5 * N)
    # galactocentric radius of simulated exoplanets
    r_obs = spatial_sampling(_N)
    # Age
    ages = np.random.uniform(1., 10., _N)  # [yr] / [1-10 Gyr]
    # Mass
    mass = IMF_sampling(-0.6, _N, Mmin=6, Mmax=75)  # [Mjup]
    mass = mass * M_jup.value / M_sun.value  # [Msun]
    # load theoretical BD cooling model - ATMO 2020
    path = "./data/"
    data = np.genfromtxt(path + "./ATMO_CEQ_vega_MIRI.txt", unpack=True)
    points = np.transpose(data[0:2, :])
    values = data[2]
    xi = np.transpose(np.asarray([ages, mass]))

    Teff = griddata(points, values, xi)
    heat_int = heat(Teff, np.ones(len(Teff)) * R_jup.value)
    #print(len(Teff), len(r_obs), len(heat_int), len(mass))
    # Observed velocity (internal heating + DM)
    Tobs = temperature_withDM(r_obs,
                              heat_int,
                              f=f_true,
                              R=R_jup.value,
                              M=mass * M_sun.value,
                              parameters=[gamma_true, rs_true, rho0_true])
    # add Gaussian noise
    Tobs_wn = Tobs + np.random.normal(
        loc=0, scale=(rel_unc_Tobs * Tobs), size=_N)
    mass_wn = mass + np.random.normal(loc=0, scale=(rel_mass * mass), size=_N)
    # select only those objects with masses between 14 and 55 Mjup and T > Tmin
    pos = np.where((mass_wn > 0.013) & (mass_wn < 0.053) & (Tobs > Tmin)
                   & (Tobs_wn > Tmin))
    if len(pos[0]) < N:
        sys.exit("Less objects than required!")
    #return
    return r_obs[pos][:N], Tobs_wn[pos][:N], mass_wn[pos][:N], ages[pos][:N]
def mock_population_sens(N,
                         rel_unc_Tobs,
                         rel_mass,
                         points,
                         values,
                         f_true,
                         gamma_true,
                         rs_true,
                         rho0_true=0.42,
                         Tmin=0.):
    """
    Generate N observed exoplanets - intended to be run with sensitivity
    analysis

    Assumptions
    -----------
    1) N observed exoplanets distributed according to E2 bulge + BR disc
    2) (All) exoplanets radius = Rjup
    3) BD evolution model taken from ATMO 2020
    4) BDs have masses chosen between 14-55 Mjup assuming power-law IMF and
       unifrom age distribution between 1-10 Gyr
    5) Tobs has relative uncertainty rel_unc_Tobs
    6) Estimated masses have an uncertainty of rel_mass
    """
    #np.random.seed(42)
    _N = int(5.5 * N)
    # galactocentric radius of simulated exoplanets
    r_obs = spatial_sampling(_N)
    # Ages and masses of simulated BDs
    ages = np.random.uniform(1., 10., _N)  # [yr] / [1-10 Gyr]
    mass = IMF_sampling(-0.6, _N, Mmin=6, Mmax=75)  # [Mjup]
    mass = mass * M_jup.value / M_sun.value  # [Msun]
    xi = np.transpose(np.asarray([ages, mass]))
    Teff = griddata(points, values, xi)  # true Teff [K]
    heat_int = heat(Teff, np.ones(len(Teff)) * R_jup.value)

    # Observed velocity (internal heating + DM)
    Tobs = temperature_withDM(r_obs,
                              heat_int,
                              f=f_true,
                              R=R_jup.value,
                              M=mass * M_sun.value,
                              parameters=[gamma_true, rs_true, rho0_true])
    # add Gaussian noise
    Tobs_wn = Tobs + np.random.normal(
        loc=0, scale=(rel_unc_Tobs * Tobs), size=_N)
    mass_wn = mass + np.random.normal(loc=0, scale=(rel_mass * mass), size=_N)
    # select only those objects with masses between 14 and 55 Mjup and T > Tmin
    pos = np.where((mass_wn > 0.013) & (mass_wn < 0.053) & (Tobs > Tmin)
                   & (Tobs_wn > Tmin))
    if len(pos[0]) < N:
        sys.exit("Less objects than required!")
    # estimated Teff [K]
    xi = np.transpose(np.asarray([ages[pos][:N], mass_wn[pos][:N]]))
    Teff = griddata(points, values, xi)
    #return
    return Tobs_wn[pos][:N], Teff
def UL(rs, f, nBDs, relT_R, relM_R, relR_R, relA_R, steps=300):
    # Generate "real" observation assuming only background (no DM)
    rho0=0.42
    # Load ATMO2020 model
    path   = "/home/mariacst/exoplanets/exoplanets/data/"
    data   = np.genfromtxt(path + "./ATMO_CEQ_vega_MIRI.txt", unpack=True)
    points = np.transpose(data[0:2, :])
    values = data[2]
    robs_R, Tobs_R, mass_R, ages_R = mock_population_all(nBDs, 0., 0., 
                                         0., 0.,
                                         0, 1., 1., rho0_true=rho0, v=None)
    xi         = np.transpose(np.asarray([ages_R, mass_R]))
    Teff       = griddata(points, values, xi)
    heat_int_R = heat(Teff, np.ones(len(Teff))*R_jup.value)

    import pdb
    pdb.set_trace()

    gamma_up = np.ones(len(rs))*10
    for i in range(len(rs)):
        if rs[i] > 7.:
            gamma_k = np.linspace(0.4, 2.9, 35) # change this?
        else:
            gamma_k  = np.linspace(0, 1.5, 35) # change this?
        for g in gamma_k:
            _p_sb = p_value_sb(g, f, rs[i], nBDs, Tobs_R, robs_R, mass_R, 
                               heat_int_R, relT_R, relM_R, relR_R, relA_R, 
                               steps=steps)
            _p_b = p_value_b(g, f, rs[i], nBDs, Tobs_R, robs_R, mass_R, heat_int_R,
                             relT_R, relM_R, relR_R, relA_R, steps=steps)
            try:
                CL = _p_sb / _p_b
            except ZeroDivisionError:
                CL = 200.
            if CL < 0.05:
                print(rs[i], g)
                gamma_up[i] = g
                break
    #return
    return gamma_up
Esempio n. 5
0
ITERS = 100
ALPHA = 0.01
EPSI = 0.1

CW_ITERS = 100
BIN_STEPS = 20
NORM = 'l2'  #l0/l2/linf

(train_features, train_labels), (test_features,
                                 test_labels) = cifar10.load_data()
(test_features, test_labels) = agu(test_features, test_labels)
train_features = train_features.astype('float32')
test_features = test_features.astype('float32')
train_features /= 255
test_features /= 255
test_labels = np_utils.to_categorical(test_labels, 10)

if len(sys.argv) < 2 or sys.argv[1] == "fgsm_it":
    perturbed_accuracy = fgsm_it(test_features[:100], ITERS, EPSI, ALPHA,
                                 build_network, loss_func, evaluation,
                                 './tmp/original_cifar_model-8')
    print([s[0] for s in perturbed_accuracy])
    stats([s[1] for s in perturbed_accuracy], test_labels[:1000])
    heat([s[1] for s in perturbed_accuracy], test_labels[:1000],
         "cifar10_fgsm_")
else:
    perturbed_norms, was = cw(test_features[:100], test_labels[:100], CW_ITERS,
                              BIN_STEPS, build_network, loss_func, evaluation,
                              NORM, './tmp/original_cifar_model-8')
    stats_cari(perturbed_norms, was)
points = np.transpose(np.asarray([_age_i, _mass]))
values = np.asarray(_teff)

np.random.seed(42)
robs, Tobs, mass, ages = mock_population(nBDs,
                                         rel_unc_Tobs,
                                         rel_mass,
                                         f_true,
                                         gamma_true,
                                         rs_true,
                                         rho0_true=rho0)
print(Tobs[0], Tobs[467], robs[34])
## calculate predictic intrinsic heat flow for mock BDs
xi = np.transpose(np.asarray([ages, mass]))
Teff = griddata(points, values, xi)
heat_int = heat(Teff, np.ones(len(Teff)) * R_jup.value)
# ------------------ RECONSTRUCTION --------------------------------------
ndim = 3
nwalkers = 50
# first guess
p0 = [[-0.05, 0.9, 1.3] + 1e-4 * np.random.randn(ndim)
      for j in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
#args=(r_obs, Tobs, rel_unc_T, heat_int, mass))
pos, prob, state = sampler.run_mcmc(p0, 300, progress=True)
sampler.reset()
pos, prob, state = sampler.run_mcmc(pos, 5000, progress=True)
like = sampler.flatlnprobability  # likelihood
maxlike = sampler.flatchain[np.argmax(sampler.flatlnprobability)]
print("ML estimator : ", maxlike)
from attacks import fgsm_it, cw
from utils import stats, stats_cari, heat, heat_cari
from mnist_cnn import build_network, loss_func, evaluation
from tensorflow.examples.tutorials.mnist import input_data as mnist_data
from keras.datasets import cifar10
from tqdm import tqdm

ITERS = 100
ALPHA = 0.01
EPSI = 0.1

CW_ITERS = 100
BIN_STEPS = 20
NORM = 'l2'  #l0/l2/linf

mnist = mnist_data.read_data_sets('MNIST_data', one_hot=True)

if len(sys.argv) < 2 or sys.argv[1] == "fgsm_it":
    perturbed_accuracy = fgsm_it(mnist.test.images[:1000], ITERS, EPSI, ALPHA,
                                 build_network, loss_func, evaluation)
    print([s[0] for s in perturbed_accuracy])
    stats([s[1] for s in perturbed_accuracy], mnist.test.labels[:1000])
    heat([s[1] for s in perturbed_accuracy], mnist.test.labels[:1000],
         "mnist_fgsm_")
else:
    perturbed_norms, was = cw(mnist.test.images[:1000],
                              mnist.test.labels[:1000], CW_ITERS, BIN_STEPS,
                              build_network, loss_func, evaluation, NORM)
    stats_cari(perturbed_norms, was)
    heat_cari(perturbed_norms, was)