Exemplo n.º 1
0
def test_distance_success():
    lens = mm.Lens()
    lens.distance = 5.*1000.*u.pc
    assert lens.distance == 5000.*u.pc
Exemplo n.º 2
0
import os
import sys
sys.path.append("/home/antares/poleski/WFIRST/MulensModel/source")
import MulensModel as mm


file_1 = "test_100.txt"
data_1 = mm.MulensData(file_name=file_1)
t_0 = 2456900.
u_0 = 0.01
t_E = 20.
model = mm.Model({'t_0': t_0, 'u_0': u_0, 't_E': t_E})
event = mm.Event(datasets=[data_1], model=model)
event.sum_function = 'numpy.sum'

import numpy as np

import MulensModel as mm


s = 1.2
q = 0.5
n_points = 200

color = np.linspace(0., 1., n_points)

# First, we use standard procedure to plot the caustic. You will see that
# the points are distributed non-uniformly, i.e., the density is higher
# near cusps. We also add color to indicate order of plotting.
# It turns out to be a complicated shape.
caustic = mm.Caustics(s=s, q=q)
caustic.plot(c=color, n_points=n_points)
plt.axis('equal')
plt.colorbar()
plt.title('standard plotting')

# Second, we use uniform sampling. The density of points is constant.
# Here the color scale indicates x_caustic values.
plt.figure()
sampling = mm.UniformCausticSampling(s=s, q=q)
points = [sampling.caustic_point(c) for c in color]
x = [p.real for p in points]
y = [p.imag for p in points]
plt.scatter(x, y, c=color)
plt.axis('equal')
plt.colorbar()
Exemplo n.º 4
0
    lp = lnprior(theta, parameters_to_fit)
    if not np.isfinite(lp):
        return -np.inf
    ln_like = lnlike(theta, event, parameters_to_fit)
    if np.isnan(ln_like):  # In the cases that source fluxes are negative we
        return -np.inf  # want to return these as if they were not in priors.
    return lp + ln_like


# Initialize the model
parameters_to_fit = ["t_0", "u_0", "t_E"]
parameters_values = [2455400., 0.5, 30.]
parameters_steps = [1., 0.01, 1.]

model = mm.Model(
    {'t_0': parameters_values[0], 'u_0': parameters_values[1],
     't_E': parameters_values[2]})
print("Initial", model.parameters)

# Read in the data
file_ = os.path.join(mm.DATA_PATH, "photometry_files",
                     "OB08092", "phot_ob08092_O4.dat")
data = mm.MulensData(file_name=file_, add_2450000=True)

# Set up the Event
event = mm.Event(datasets=data, model=model)

# Baseline chi2 = # of data points
chi2_0 = len(data.time) * 1.

# Initializations for emcee
Exemplo n.º 5
0
    Note: this implementation is robust but possibly inefficient. If
    chi2_fun() is ALWAYS called before jacobian with the same parameters,
    there is no need to set the parameters in event.model; also,
    event.calculate_chi2_gradient() can be used instead (which avoids fitting
    for the fluxes twice).
    """
    for (key, val) in enumerate(parameters_to_fit):
        setattr(event.model.parameters, val, theta[key])

    return event.get_chi2_gradient(parameters_to_fit)


# Read in the data file
file_ = os.path.join(mm.DATA_PATH, "photometry_files", "OB08092",
                     "phot_ob08092_O4.dat")
data = mm.MulensData(file_name=file_)

# Initialize the fit
parameters_to_fit = ["t_0", "u_0", "t_E"]
t_0 = 5380.
u_0 = 0.1
t_E = 18.
model = mm.Model({'t_0': t_0, 'u_0': u_0, 't_E': t_E})

# Link the data and the model
ev = mm.Event(datasets=data, model=model)
(source_flux_init, blend_flux_init) = ev.get_flux_for_dataset(data)
print('Initial Trial\n{0}'.format(ev.model.parameters))
print('Chi2 = {0}\n'.format(ev.get_chi2()))

# Find the best-fit parameters
Exemplo n.º 6
0
 def test_event_init_1(self):
     with self.assertRaises(TypeError):
         ev = mm.Event(model=3.14)
Exemplo n.º 7
0
 def generate_dataset(self, f_source, f_blend, times):
     """generate a single dataset without noise"""
     flux = f_source * self.model.get_magnification(times) + f_blend
     err = np.zeros(len(times)) + self.flux_uncertainty
     data = mm.MulensData([times, flux, err], phot_fmt='flux')
     return data
#        FSPL: 't_0', 'u_0', 't_E', 'rho'
#        PSPL w/ parallax: 't_0', 'u_0', 't_E', 'pi_E_N', 'pi_E_E'
#            (OPTIONAL: 't_0_par')
#        FSBL: 't_0', 'u_0', 't_E', 'rho', 's', 'q', 'alpha'
#        BSPL: 't_0_1', 'u_0_1', 't_0_2', 'u_0_2', 't_E'
#    By Effect:
#        parallax: 'pi_E_N', 'pi_E_E' OR 'pi_E' (OPTIONAL: 't_0_par')
#        xallarap: 'xi_E_N', 'xi_E_E', 'period'
#        finite source: 'rho' (1 source) OR 'rho_1', 'rho_2' (if 2 sources)
#        lens orbital motion: 'dsdt', 'dalphadt' (OPTIONAL: 'z', 'dzdt')
#    Alternative parameterizations:
#        any two of 'u_0', 't_E', 't_eff' (t_eff = u_0 * t_E)
#        any two of 't_E', 'rho', 't_star' (t_star = rho * t_E)
#        FSBL: 't_1', 'u_0', 't_2', 'rho', 's', 'q', 'alpha' (Cassan)

PSPL_params = mm.ModelParameters({'t_0': 2458060., 'u_0': 0.2, 't_E': 30.5})

my_PSPL_model = mm.Model({'t_0': 2458060., 'u_0': 0.2, 't_E': 30.5})
print(my_PSPL_model.parameters)
print(my_PSPL_model.parameters.t_eff)
print('rho:', my_PSPL_model.parameters.rho)
# Returns: None

FSPL_params = dict(my_PSPL_model.parameters.parameters)
FSPL_params['rho'] = 0.001

my_FSPL_model = mm.Model(FSPL_params)

try:
    my_PSPL_model = mm.Model(parameters={
        't_0': 2458060.,
Exemplo n.º 9
0
import matplotlib.pyplot as plt

import MulensModel as mm

# Needs commentary

raise NotImplementedError(
    'Most of this use case is not implemented. Need to reconsider ' +
    'estimate_model_params() given that Model cannot be defined this way.')

data = mm.MulensData(file_name="my_data.dat")
model = mm.Model(n_components=1)  # This is not allowed

data.bad[np.isnan(data.err)] = True

event = mm.Event(datasets=data, model=model)
event.estimate_model_params()  # aspirational
event.get_chi2()
event.clean_data()
event.clean_data(sigma=3.)
"""
if sigma==None, set sigma based on the number of data points.
clean_data requires fitting a model...
"""
event.get_chi2()  # should mask bad data

plt.scatter(data.time, data.mag, marker="o", facecolor=None)
plt.scatter(data.time[data.good],
            data.mag[not data.bad],
            marker="o",
            facecolor="black")
Exemplo n.º 10
0
import matplotlib.pyplot as plt
import os

import MulensModel as mm


data_dir = os.path.join(mm.DATA_PATH, 'photometry_files', 'OB140939')
ephemeris_dir = os.path.join(mm.DATA_PATH, 'ephemeris_files')

ra = '17:47:12.25'
dec = '-21:22:58.2'
ra_dec = ra + " " + dec

# Specifying coordinates to calculate HJD from JD
data_1 = mm.MulensData(
    file_name=os.path.join(data_dir, 'ob140939_OGLE.dat'),
    coords=ra_dec)

data_2 = mm.MulensData(
    file_name=os.path.join(data_dir, 'ob140939_OGLE.dat'),
    ra=ra, dec=dec)

coords = SkyCoord(ra_dec, unit=(u.hourangle, u.deg))
data_3 = mm.MulensData(
    file_name=os.path.join(data_dir, 'ob140939_OGLE.dat'), coords=coords)

# Specifying coordinates to calculate a model with parallax
t_0 = 2456836.22
u_0 = 0.922
t_E = 22.87
pi_E_N = -0.248
Exemplo n.º 11
0
- the magnification of the model (???)
"""
from astropy import units as u
import numpy as np
import matplotlib.pyplot as plt

import MulensModel as mm

# Initialize the model
t_0 = 2455747.049357
t_E = 21.6796
model = mm.Model({
    't_0': t_0,
    'u_0': 0.00352,
    't_E': t_E,
    'rho': 0.001647,
    'alpha': 41.35 * u.deg,
    's': 0.5486,
    'q': 0.00532
})

# times to calculate the magnification
times = np.arange(t_0 - 1., t_0 + 1., 0.001)

# Set method that is used when no other method is specified
# (default value is 'point_source'):
model.set_default_magnification_method('point_source')

# Calculate the magnification using different magnification calculations
default_magnification = model.get_magnification(times)
Exemplo n.º 12
0
def test_a_proj_success():
    lens = mm.Lens(mass_1=1.0*u.solMass, mass_2=0.1*u.solMass, a_proj=1.0*u.au,
                   distance=6.*u.kpc)
    assert lens.total_mass == 1.1*u.solMass
    assert lens.q == 0.1
Exemplo n.º 13
0
def test_q_success():
    lens = mm.Lens()
    lens.mass_1 = 1.0*u.solMass
    lens.q = 10.**3
    assert lens.mass_2 == 10.**3*u.solMass
    assert lens.total_mass == (1.0+10.**3)*u.solMass
Exemplo n.º 14
0
def test_q_total_mass():
    lens = mm.Lens()
    lens.q = 0.25
    lens.total_mass = 0.8*u.solMass
    np.testing.assert_almost_equal(lens.mass_1.value, 0.64)
    np.testing.assert_almost_equal(lens.mass_2.value, 0.16)
Exemplo n.º 15
0
    's': s,
    'q': q
})
params_3 = mm.modelparameters.ModelParameters({
    't_0': t_0,
    'u_0': u_0,
    't_E': t_E,
    'pi_E_N': 0.2,
    'pi_E_E': 0,
    'rho': rho,
    'alpha': alpha,
    's': s,
    'q': q
})
#get the trajectory of the source
traj_1 = mm.Trajectory(times, params_1, coords=coord)
traj_2 = mm.Trajectory(times, params_2, coords=coord)
traj_3 = mm.Trajectory(times, params_3, parallax=paral, coords=coord)

#get the magnification of the model
light_1 = []
light_2 = []
light_3 = []
model_1 = mm.BinaryLens(m_1 / 11, m_2 / 11, s)

for i in range(len(traj_1.x)):
    light_1.append(model_1.vbbl_magnification(traj_1.x[i], traj_1.y[i], rho))
    light_2.append(model_1.vbbl_magnification(traj_2.x[i], traj_2.y[i], rho))
    light_3.append(model_1.vbbl_magnification(traj_3.x[i], traj_3.y[i], rho))

#plot the light curve
Exemplo n.º 16
0
import os
import sys
import numpy as np
import MulensModel as mm
import scipy.optimize as op
import matplotlib.pyplot as plt

#Loading main files
event_dir = 'ev03'
Mag = np.loadtxt(os.path.join(sys.path[0], event_dir, 'files/magni.txt'))
Disk = np.loadtxt(os.path.join(sys.path[0], event_dir, 'files/disk.txt'))
Params = np.loadtxt(os.path.join(sys.path[0], event_dir, 'files/param.txt'))
mag_file = os.path.join(sys.path[0], event_dir, 'files/W149.out')
mag_data = mm.MulensData(file_name=mag_file)

#Building model and event with initial parameters
t0 = 0
u0 = 0.02
tE = 100
dt = 70 / 3000

pspl_model = mm.Model({'t_0': t0, 'u_0': u0, 't_E': tE})
pspl_model.set_datasets([mag_data])
event = mm.Event(datasets=mag_data, model=pspl_model)

#Fitting
parameters_to_fit = ["t_0", "u_0", "t_E"]
initial_guess = [t0, u0, tE]


def chi2_for_model(theta, event, parameters_to_fit):
Exemplo n.º 17
0
#set up the time array for t_0-2t_E to t_0+2t_E
t_gc = julian.from_jd(t_0,'jd')
t_start = julian.to_jd(t_gc - datetime.timedelta(days=2*t_E),fmt='jd')
t_end = julian.to_jd(t_gc + datetime.timedelta(days=2*t_E),fmt='jd')
times = np.linspace(t_start,t_end,100)


#change the unit for the times array to t_E
times_plot = (times-t_0)/t_E
    


#set up PSPL model 
#model_PSPL= mm.MagnificationCurve(times,params,coords='18:00:00 -30:00:00')

model_PSPL_1= mm.MagnificationCurve(times,params,coords='18:00:00 -30:00:00')
params.u_0 = 0.3
model_PSPL_2= mm.MagnificationCurve(times,params,coords='18:00:00 -30:00:00')
params.u_0 = 1.0
model_PSPL_3= mm.MagnificationCurve(times,params,coords='18:00:00 -30:00:00')



#get light curve data for different u_0
curve_1 = model_PSPL_1.get_point_lens_magnification()
curve_2 = model_PSPL_2.get_point_lens_magnification()
curve_3 = model_PSPL_3.get_point_lens_magnification()
"""
model_PSPL.parameters.u_0 = 0.3
print(model_PSPL.parameters)
curve_2 = model_PSPL.get_point_lens_magnification()
Exemplo n.º 18
0
        return -np.inf
    ln_like_ = ln_like(theta, event, parameters_to_fit)

    # In the cases that source fluxes are negative we want to return
    # these as if they were not in priors.
    if np.isnan(ln_like_):
        return -np.inf

    return ln_prior_ + ln_like_


# Read the data (note that we do not rescale errorbars here):
dir_ = join(mm.DATA_PATH, "photometry_files", "OB140939")
file_ground = join(dir_, "ob140939_OGLE.dat")
file_spitzer = join(dir_, "ob140939_Spitzer.dat")
data_ground = mm.MulensData(file_name=file_ground,
                            plot_properties={'label': 'OGLE'})

# Here is the main difference - we provide the ephemeris for Spitzer:
file_spitzer_eph = join(
    mm.DATA_PATH, 'ephemeris_files', 'Spitzer_ephemeris_01.dat')
data_spitzer = mm.MulensData(file_name=file_spitzer,
                             ephemerides_file=file_spitzer_eph,
                             plot_properties={'label': 'Spitzer'})

# For parallax calculations we need event coordinates:
coords = "17:47:12.25 -21:22:58.7"

# Starting parameters:
params = {
    't_0': 2456830., 'u_0': 0.8, 't_E': 25.,
    'pi_E_N': 0., 'pi_E_E': 0.,
Exemplo n.º 19
0
 def test_event_init_2(self):
     with self.assertRaises(TypeError):
         ev = mm.Event(datasets='some_string')
Exemplo n.º 20
0
import os
import sys
sys.path.append("/home/antares/poleski/WFIRST/MulensModel/source")
import MulensModel as mm


file_1 = "test_1000_piE.txt"
data_1 = mm.MulensData(file_name=file_1)
t_0 = 2456900.
u_0 = 0.1
t_E = 50.
pi_E_N = 0.6
pi_E_E = 0.8
param = {'t_0': t_0, 'u_0': u_0, 't_E': t_E, 'pi_E_N': pi_E_N, 'pi_E_E': pi_E_E, 't_0_par': t_0}
model = mm.Model(param, coords="18:00:00.00 -30:00:00.0")
event = mm.Event(datasets=[data_1], model=model)
event.sum_function = 'numpy.sum'

event.get_chi2()
Exemplo n.º 21
0
    def generate_fake_datasets(self):
        """
        Generate perfect datasets with different source and blend fluxes
        """
        model_1 = mm.Model(self.model.parameters.source_1_parameters)
        model_2 = mm.Model(self.model.parameters.source_2_parameters)
        model_2.set_magnification_methods(self.model._methods[2])
        model_2.set_limb_coeff_gamma('I', self.gamma['I'])
        model_2.set_limb_coeff_gamma('V', self.gamma['V'])

        def gen_data(f_source_1,
                     f_blend,
                     q_flux,
                     times,
                     bandpass=None,
                     **kwargs):
            """generate perfect data for a given set of properties"""

            if bandpass == 'I':
                gamma = self.gamma['I']
            elif bandpass == 'V':
                gamma = self.gamma['V']
            else:
                gamma = None

            mag_1 = model_1.get_magnification(times)
            mag_2 = model_2.get_magnification(times, gamma=gamma)
            f_source_2 = f_source_1 * q_flux
            flux = f_source_1 * mag_1 + f_source_2 * mag_2 + f_blend
            err = np.zeros(len(times)) + 0.01
            data = mm.MulensData([times, flux, err],
                                 phot_fmt='flux',
                                 bandpass=bandpass,
                                 **kwargs)

            return data

        def add_data(properties, label=None):
            """create data in two bands for each fake observatory"""
            times = np.arange(properties['t_start'], properties['t_stop'],
                              properties['dt_I'])
            data_I = gen_data(properties['f_source_I'],
                              properties['f_blend_I'],
                              self.q_I,
                              times,
                              bandpass='******',
                              plot_properties={'label': '{0} I'.format(label)})
            self.datasets.append(data_I)
            times = np.arange(properties['t_start'], properties['t_stop'],
                              properties['dt_V'])
            data_V = gen_data(properties['f_source_V'],
                              properties['f_blend_V'],
                              self.q_V,
                              times,
                              bandpass='******',
                              plot_properties={'label': '{0} V'.format(label)})
            self.datasets.append(data_V)

        self.datasets = []
        self.expected_fluxes = []
        n_tE = 10
        self.data_properties = {
            # dense "Survey" data
            'survey_1': {
                't_start': (self.model.parameters.t_0_1 -
                            n_tE * self.model.parameters.t_E),
                't_stop': (self.model.parameters.t_0_1 +
                           n_tE * self.model.parameters.t_E),
                'dt_I':
                0.04,
                'dt_V':
                0.4,
                'f_source_I':
                1.,
                'f_blend_I':
                0.2,
                'f_source_V':
                0.8,
                'f_blend_V':
                0.3
            },
            # sparse "Survey" data
            'survey_2': {
                't_start': (self.model.parameters.t_0_1 -
                            n_tE * self.model.parameters.t_E + 1.01),
                't_stop': (self.model.parameters.t_0_1 +
                           n_tE * self.model.parameters.t_E + 1.01),
                'dt_I':
                1.0,
                'dt_V':
                2.0,
                'f_source_I':
                1.1,
                'f_blend_I':
                0.15,
                'f_source_V':
                0.81,
                'f_blend_V':
                0.2
            },
            # "FollowUp" data
            'followup_1': {
                't_start': (self.model.parameters.t_0_1 - 1.1),
                't_stop':
                (self.model.parameters.t_0_2 + self.model.parameters.t_E),
                'dt_I': 0.001,
                'dt_V': 0.01,
                'f_source_I': 2.3,
                'f_blend_I': 0.5,
                'f_source_V': 1.8,
                'f_blend_V': 0.65
            }
        }

        # The data_keys list is intentional to ensure that survey_1, I is the
        # first (and reference) dataset
        self.data_keys = ['survey_1', 'survey_2', 'followup_1']
        for key in self.data_keys:
            add_data(self.data_properties[key], label=key)
            self.expected_fluxes.append(([
                self.data_properties[key]['f_source_I'],
                self.q_I * self.data_properties[key]['f_source_I']
            ], self.data_properties[key]['f_blend_I']))
            self.expected_fluxes.append(([
                self.data_properties[key]['f_source_V'],
                self.q_V * self.data_properties[key]['f_source_V']
            ], self.data_properties[key]['f_blend_V']))
Exemplo n.º 22
0
"""
import glob
import os
import matplotlib.pyplot as plt
from matplotlib import gridspec

import MulensModel as mm

# Read in MB08310 data files (see data/MB08310) as MulensData objects.
# Grabbing all data files in the MB08310 folder
files = glob.glob(
    os.path.join(mm.DATA_PATH, "photometry_files", "MB08310", "*.tbl"))

datasets_default = []
for file_ in sorted(files):
    data = mm.MulensData(file_name=file_, comments=["\\", "|"])
    datasets_default.append(data)

# Define basic point lens model
t_0 = 2454656.39975
u_0 = 0.00300
t_E = 11.14
t_star = 0.05487
plens_model = mm.Model({'t_0': t_0, 'u_0': u_0, 't_E': t_E, 't_star': t_star})
method = 'finite_source_uniform_Gould94'
plens_model.set_magnification_methods([t_0 - .05, method, t_0 + .05])

# Combine the data and model into an event
event_default = mm.Event(datasets=datasets_default, model=plens_model)
event_default.data_ref = 6
Exemplo n.º 23
0
config = configparser.ConfigParser()
config.optionxform = str  # So that "t_E" is not changed to "t_e".
config.read(config_file)
files = read.read_files_from_config(config)
model_settings = read.read_model_settings(config)
(parameters, starting) = read.read_parameters_start(config)
fixed_parameters = read.read_fix_parameters(config)
(min_values, max_values) = read.read_min_max(config)
ln_prior.min = min_values
ln_prior.max = max_values
emcee_settings = read.read_emcee_settings(config)
other_settings = read.read_other(config)

# Read photometric data.
datasets = [mm.MulensData(file_name=f[0], phot_fmt=f[1]) for f in files]

# Generate starting values of parameters.
start = generate_random_parameters(parameters, starting,
                                   emcee_settings['n_walkers'])

# Setup Event instance that combines model and data.
par = dict(zip(parameters, start[0]))
par = {**par, **fixed_parameters}
my_model = mm.Model(par, coords=model_settings['coords'])
if 'methods' in model_settings:
    my_model.set_magnification_methods(model_settings['methods'])
if 'default_method' in model_settings:
    my_model.set_default_magnification_method(model_settings['default_method'])
my_event = mm.Event(datasets=datasets, model=my_model)
Exemplo n.º 24
0
        })
        plot.append(BL(**param[-1]))
        plot[-1].get_position_arrays(region=region, region_lim=region_lim)
        plot[-1].get_magnification_array()

x_array = plot[0].x_array
y_array = plot[0].y_array
mag_BL = [None] * len(plot)
for (i, p) in enumerate(plot):
    mag_BL[i] = p.magn_array

blens = []
mag_MM = []

# Assuming the total system's mass is 1 unit:
blens = (mm.BinaryLens(mass_1=1. / (1. + q), mass_2=q / (1. + q),
                       separation=s))
"""
# Assuming the star's mass is 1 unit:
blens = (mm.BinaryLens(mass_1=1, mass_2=q, separation=s))
"""

for i in range(len(x_array)):
    mag_MM.append(
        blens._point_source_WM95(source_x=-x_array[i], source_y=y_array[i]))
"""
plt.scatter(x_array, y_array, c = mag_BL[0])
plt.show()
plt.scatter(x_com, y_com, c = mag_MM[0])
plt.show()
"""
if len(sys.argv) != 2:
    raise ValueError('Exactly one argument needed - cfg file')
config_file = sys.argv[1]
if not isfile(config_file):
    raise FileNotFoundError('File: {:}'.format(config_file))
config = configparser.ConfigParser()
config.optionxform = str
config.read(config_file)

# Read the data
section = "photometry files"
if section not in config:
    raise KeyError('Sorry, no photometry files specified in config.')
file_names = [config.get(section, var) for var in config[section]]
kwargs = {'comments': ["\\", "|"]}
data = [mm.MulensData(file_name=name, **kwargs) for name in file_names]

# Read parameters
section = "parameters to fit"
info = [[var, config.get(section, var).split()] for var in config[section]]
for info_ in info:
    if len(info_[1]) != 2:
        msg = 'Wrong input in cfg file:\n{:}'
        raise ValueError(mag.format(config.get(section, info_[0])))
parameters_to_fit = [x[0] for x in info]
starting_mean = [float(x[1][0]) for x in info]
starting_sigma = [float(x[1][1]) for x in info]
parameters = {key: va for (key, va) in zip(parameters_to_fit, starting_mean)}
n_parameters = len(parameters)

model = mm.Model(parameters)
Exemplo n.º 26
0
MCPM_options = read_config.read_MCPM_options(config)

# other constraints:
other_constraints = read_config.read_other_constraints(config)

# End of settings.
###################################################################
n_params = len(parameters_to_fit)
config_file_root = os.path.splitext(config_file)[0]
if file_all_models is None:
    file_all_models = config_file_root + ".models"

# read datasets
datasets = []
if skycoord is not None:
    coords = MM.Coordinates(skycoord)
else:
    coords = None
if files is not None:
    for (file_, fmt, kwargs) in zip(files, files_formats, files_kwargs):
        data = MM.MulensData(file_name=file_,
                             add_2450000=True,
                             phot_fmt=fmt,
                             coords=coords,
                             **kwargs)
        datasets.append(data)

# satellite datasets
cpm_sources = []
for campaign in MCPM_options['campaigns']:
    cpm_source = CpmFitSource(ra=skycoord.ra.deg,
Exemplo n.º 27
0
    if np.isnan(ln_like_):
        return -np.inf

    return ln_prior_ + ln_like_


# Add data from OB161195
datasets = []
file_names = [
    'KCT01I.dat', 'KCT41I.dat', 'KCT42I.dat', 'KSA01I.dat', 'KSA41I.dat',
    'KSA42I.dat', 'KSS01I.dat', 'KSS41I.dat', 'KSS42I.dat', 'spitzer_b12.dat'
]
dir_ = os.path.join(MulensModel.DATA_PATH, "photometry_files", "OB161195")
for file_name in file_names:
    file_ = os.path.join(dir_, file_name)
    datasets.append(MulensModel.MulensData(file_name=file_, add_2450000=True))

# Close-- model
params = {
    't_0': 2457568.7692,
    'u_0': 0.05321,
    't_E': 9.96,
    'rho': 0.00290,
    'pi_E_N': -0.2154,
    'pi_E_E': -0.380,
    'alpha': np.rad2deg(-0.9684),
    's': 0.9842,
    'q': 0.0000543
}
model = MulensModel.Model(params)
Exemplo n.º 28
0
        plot_properties['show_errorbars'] = False
    elif 'CTIO_I' in filename:
        plot_properties['color'] = 'green'

    return plot_properties


data_path = os.path.join(mm.DATA_PATH, 'photometry_files')
comments = ['\\', '|']

# Basic: Two datasets with specified data properties
ob03235_ogle_data = mm.MulensData(file_name=os.path.join(
    data_path, 'OB03235', 'OB03235_OGLE.tbl.txt'),
                                  phot_fmt='mag',
                                  comments=comments,
                                  plot_properties={
                                      'color': 'black',
                                      'zorder': 10,
                                      'show_bad': True
                                  })
ob03235_moa_data = mm.MulensData(file_name=os.path.join(
    data_path, 'OB03235', 'OB03235_MOA.tbl.txt'),
                                 phot_fmt='flux',
                                 comments=comments,
                                 plot_properties={
                                     'marker': 's',
                                     'markersize': 2,
                                     'color': 'red',
                                     'zorder': 2,
                                     'show_errorbars': False
                                 })
This is a use case for Spitzer + Kepler. The goal is to partially reproduce
Figure 3 from Zhu et al. 2017 ApJL 849 L31. = MOA-2016-BLG-290

"""
import numpy as np
import matplotlib.pyplot as plt

import MulensModel as mm

raise NotImplementedError('satellite keyword for MulensData not supported')
# Needs a new trial event, preferably a point lens from Zhu?.

# Import Data
ogle_data = mm.MulensData(file_name='OGLE File Name',
                          plot_properties={
                              'label': 'OGLE',
                              'color': 'black'
                          })
moa_data = mm.MulensData(file_name='MOA File Name',
                         plot_properties={
                             'label': 'MOA',
                             'color': 'orange'
                         })
spitzer_data = mm.MulensData(
    file_name='Spitzer File Name',
    satellite='Spitzer',  # this keyword does not work.
    plot_properties={
        'label': 'Spitzer',
        'color': 'red'
    })
kepler_data = mm.MulensData(file_name='Kepler File Name',
Exemplo n.º 30
0
def test_single_mass_success():
    lens_single = mm.Lens()
    lens_single.mass = 0.5*u.solMass
    assert lens_single.mass == 0.5*u.solMass
    assert lens_single.mass_1 == 0.5*u.solMass
    assert lens_single.total_mass == 0.5*u.solMass