def main(arguments):
    global phi
    hypothesis.disable_gpu()
    observation = load_observation(arguments).reshape(-1)
    inputs, outputs = load_joint(arguments)
    threshold = arguments.threshold
    if arguments.auto:
        print("Tuning acceptance threshold!")
        threshold = tune(inputs, outputs, observation, arguments.threshold)
    print("Threshold found at:", threshold,
          " Applying full posterior sampling!")
    posterior_samples = sample(inputs, outputs, observation, threshold)
    if arguments.out is not None:
        np.save(arguments.out + "/samples.npy", np.array(posterior_samples))
def main(arguments):
    hypothesis.disable_gpu()
    prior_age = PriorAge()
    prior_mass = PriorMass()
    if arguments.mocks is not None:
        ages = torch.linspace(prior_age.low, prior_age.high, arguments.mocks)
        masses = torch.linspace(prior_mass.low, prior_mass.high,
                                arguments.mocks)
        ages = ages[arguments.mock_index].view(1, 1)
        masses = masses[arguments.mock_index].view(1, 1)
    elif arguments.nominal:
        ages = torch.linspace(prior_age.low, prior_age.high, 10)
        masses = torch.linspace(prior_mass.low, prior_mass.high, 10)
        ages = ages[arguments.mock_index].view(1, 1)
        ages = np.vstack([ages for _ in range(arguments.size)])
        masses = masses[arguments.mock_index].view(1, 1)
        masses = np.vstack([masses for _ in range(arguments.size)])
    else:
        ages = prior_age.sample().view(1, 1)
        masses = prior_mass.sample((arguments.size, ))
    # Simulate the stream
    simulator = GD1StreamSimulator()
    stream = simulator(ages)[0]
    # Simulate the densities with the subhalo impacts
    simulator = WDMSubhaloSimulator(stream, record_impacts=True)
    results = simulator(masses)
    # Prepare the results
    densities = []
    phis = []
    impacts = []
    for result in results:
        impacts.append(np.array(result[0]).reshape(1, 1))
        phis.append(np.array(result[1]).reshape(1, -1))
        densities.append(np.array(result[2]).reshape(1, -1))
    ages = ages.repeat(arguments.size, 1).view(-1, 1).numpy()
    masses = masses.numpy()
    densities = np.vstack(densities).reshape(arguments.size, -1)
    # Store the results
    np.save(arguments.out + "/ages.npy", ages)
    np.save(arguments.out + "/masses.npy", masses)
    np.save(arguments.out + "/phi.npy", phis)
    np.save(arguments.out + "/impacts.npy", impacts)
    np.save(arguments.out + "/densities.npy", densities)
Exemplo n.º 3
0
def main(arguments):
    global phi
    hypothesis.disable_gpu()
    observation = load_observation(arguments)
    summary_observed = summary(observation)
    inputs, outputs = load_joint(arguments)
    observed_length = compute_length(
        phi, observation)  # Also part of the summary statistic.
    posterior_samples = []
    for index in range(len(inputs)):
        density = outputs[index]
        length = compute_length(phi, density)
        if length < observed_length:  # Discard shorter streams (implicitely part of the summary).
            continue
        s = summary(density)
        d = distance(s, summary_observed)
        if np.all(d <= arguments.thresholds):
            posterior_samples.append(inputs[index])
    posterior_samples = np.array(posterior_samples)
    np.save(arguments.out + "/samples.npy", np.array(posterior_samples))
import os
import sys
sys.path.insert(
    1,
    '/home/mvasist/constraining-dark-matter-with-stellar-streams-and-ml/notebooks/'
)

import argparse
import hypothesis
hypothesis.disable_gpu()  # You can change this
import matplotlib.pyplot as plt
import numpy as np
import torch
# from sbi import utils as utils

from hypothesis.stat import highest_density_level
from util import MarginalizedAgePrior
from util import Prior
from scipy.stats import chi2
from util import load_ratio_estimator

from tqdm import tqdm
from util import download
from util import load


@torch.no_grad()
def Prior():
    lower = torch.tensor([0., -4, 2]).float()
    lower = lower.to(hypothesis.accelerator)
    upper = torch.tensor([2000., 0, 3.7]).float()
Exemplo n.º 5
0
def parse_arguments():
    parser = argparse.ArgumentParser("Conditional ratio estimator training")
    parser.add_argument("--activation",
                        type=str,
                        default="relu",
                        help="Activation function (default: relu).")
    parser.add_argument("--amsgrad",
                        action="store_true",
                        help="Use AMSGRAD version of Adam (default: false).")
    parser.add_argument("--batch-size",
                        type=int,
                        default=64,
                        help="Batch size (default: 64).")
    parser.add_argument("--batchnorm",
                        type=int,
                        default=0,
                        help="Batchnorm (default: false).")
    parser.add_argument("--beta",
                        type=float,
                        default=0.0,
                        help="Conservative term (default: 0.0).")
    parser.add_argument("--data-test-ages", type=str, default=None)
    parser.add_argument("--data-test-impacts", type=str, default=None)
    parser.add_argument("--data-test-masses", type=str, default=None)
    parser.add_argument("--data-test-outputs", type=str, default=None)
    parser.add_argument("--data-train-ages", type=str, default=None)
    parser.add_argument("--data-train-impacts", type=str, default=None)
    parser.add_argument("--data-train-masses", type=str, default=None)
    parser.add_argument("--data-train-outputs", type=str, default=None)
    parser.add_argument(
        "--disable-gpu",
        action="store_true",
        help="Disable the usage of the GPU, not recommended. (default: false)."
    )
    parser.add_argument("--dropout",
                        type=float,
                        default=0.0,
                        help="Dropout rate (default: 0.0).")
    parser.add_argument("--epochs",
                        type=int,
                        default=1,
                        help="Number of epochs (default: 1).")
    parser.add_argument("--lr",
                        type=float,
                        default=0.001,
                        help="Learning rate (default: 0.001).")
    parser.add_argument(
        "--lstm",
        action="store_true",
        help="Train the LSTM-based ratio estimator (default: false).")
    parser.add_argument("--mlp",
                        action="store_true",
                        help="Train the MLP ratio estimator (default: false).")
    parser.add_argument(
        "--n-train",
        type=int,
        default=None,
        help="Number of training samples to select (default: none).")
    parser.add_argument(
        "--normalize-inputs",
        action="store_true",
        help="Let the ratio estimator normalize the inputs (default: false).")
    parser.add_argument("--out",
                        type=str,
                        default=None,
                        help="Output directory for the model.")
    parser.add_argument("--resnet-depth",
                        type=int,
                        default=161,
                        help="ResNet depth (default: 161).")
    parser.add_argument("--weight-decay",
                        type=float,
                        default=0.0,
                        help="Weight decay (default: 0.0).")
    parser.add_argument("--workers",
                        type=int,
                        default=1,
                        help="Number of concurrent data loaders (default: 1).")
    arguments, _ = parser.parse_known_args()
    if arguments.batchnorm > 0:
        arguments.batchnorm = True
    else:
        arguments.batchnorm = False
    if arguments.disable_gpu:
        hypothesis.disable_gpu()

    return arguments