Beispiel #1
0
def fit_MM_MCPM_EMCEE(
        files, files_formats, files_kwargs, skycoord, methods, MCPM_options,
        starting_settings, parameters_to_fit, parameters_fixed,
        min_values, max_values, emcee_settings, priors_gauss, priors_tabulated,
        other_constraints, file_all_models, config_file_root, gamma_LD,
        model_type=None, data_add_245=True):
    """
    Fit the microlensing (MulensModel) and K2 photometry (MCPM) using
    EMCEE method. The input is complicated - please see code below and
    in read_config.py to find out details.

    emcee_settings['PTSampler'] == True means no blobs (i.e. ground-based
        fluxes are passed)
    emcee_settings['file_posterior'] ending in ".npy" means we're saving 3D or
        4D array, while other extensions mean we're saving text file with
        flattened posterior
    model_type: *None* or *str*
        Can be *None* (i.e., MM parameters are used), 'wide', 'close_A',
        or 'close_B'. If not None, then 't_0_pl', 'u_0_pl', and 't_E_pl'
        parameters are translated to s, q, alpha.
    """
    print("MM version: " + MM.__version__)
    print("MCPM version: " + MCPM_version)
    print("EMCEE version: " + emcee.__version__)
    print("script version: " + __version__, flush=True)

    utils.get_standard_parameters.model_type = model_type

    n_params = len(parameters_to_fit)
    if file_all_models is None:
        file_all_models = config_file_root + ".models"

    # read datasets
    datasets = []
    if skycoord is not None:
        coords = MM.Coordinates(skycoord)
    else:
        coords = None
    if files is not None:
        for (file_, fmt, kwargs) in zip(files, files_formats, files_kwargs):
            data = MM.MulensData(file_name=file_, add_2450000=data_add_245,
                                 phot_fmt=fmt, coords=coords, **kwargs)
            datasets.append(data)

    # satellite datasets
    cpm_sources = []
    for campaign in MCPM_options['campaigns']:
        cpm_source = CpmFitSource(
            ra=skycoord.ra.deg, dec=skycoord.dec.deg,
            campaign=campaign, channel=MCPM_options['channel'])
        cpm_source.get_predictor_matrix(**MCPM_options['predictor_matrix'])
        cpm_source.set_l2_l2_per_pixel(
            l2=MCPM_options['l2'], l2_per_pixel=MCPM_options['l2_per_pixel'])
        cpm_source.set_pixels_square(MCPM_options['half_size'])
        cpm_source.select_highest_prf_sum_pixels(MCPM_options['n_select'])

        cpm_sources.append(cpm_source)

    # initiate model
    starting = utils.generate_random_points(
        starting_settings, parameters_to_fit,
        emcee_settings['n_walkers'] * emcee_settings['n_temps'])
    zip_ = zip(parameters_to_fit, starting[0])
    parameters = {key: value for (key, value) in zip_}
    parameters.update(parameters_fixed)
    parameters_ = {**parameters}
    for param in list(parameters_.keys()).copy():
        pop_keys = ['f_s_sat', 'f_s_sat_over_u_0']
        if param in pop_keys or param[:3] == 'q_f' or param[:7] == 'log_q_f':
            parameters_.pop(param)
    if 't_0_pl' in parameters_:
        parameters_ = utils.get_standard_parameters(parameters_)
    try:
        model = MM.Model(parameters_, coords=coords)
    except KeyError:
        model = PixelLensingModel(parameters_, coords=coords)
    for (m_key, m_value) in methods.items():
        model.set_magnification_methods(m_value, m_key)
    for (band, gamma) in gamma_LD.items():
        model.set_limb_coeff_gamma(band, gamma)
    if isinstance(model, MM.Model):
        if 'f_s_sat' in parameters:
            f_s_sat = parameters['f_s_sat']
        else:
            f_s_sat = parameters['f_s_sat_over_u_0'] * model.parameters.u_0

    for cpm_source in cpm_sources:
        times = cpm_source.pixel_time + 2450000.
        times[np.isnan(times)] = np.mean(times[~np.isnan(times)])
        if model.n_sources == 1:
            if not isinstance(model, MM.Model):
                model_flux = model.flux_difference(times)
            else:
                model_flux = f_s_sat * (_get_magnification(model, times) - 1.)
        else:
            if not isinstance(model, MM.Model):
                raise NotImplementedError('not yet coded for pixel lensing')
            if ('log_q_f' in parameters) or ('q_f' in parameters):
                if 'log_q_f' in parameters:
                    q_f = 10**parameters['log_q_f']
                else:
                    q_f = parameters['q_f']
                model.set_source_flux_ratio(q_f)
                model_magnification = _get_magnification(model, times)
            else:  # This is very simple solution.
                model_magnification = _get_magnification(
                    model, times, separate=True)[0]
            model_flux = f_s_sat * (model_magnification - 1.)
        cpm_source.run_cpm(model_flux)

        utils.apply_limit_time(cpm_source, MCPM_options)

        mask = cpm_source.residuals_mask
        if 'mask_model_epochs' in MCPM_options:
            mask *= utils.mask_nearest_epochs(
                cpm_source.pixel_time+2450000.,
                MCPM_options['mask_model_epochs'])
        sat_time = cpm_source.pixel_time[mask] + 2450000.
        # sat_sigma = sat_time * 0. + MCPM_options['sat_sigma']
        sat_sigma = np.sqrt(np.sum(
            np.array([err[mask] for err in cpm_source.pixel_flux_err])**2,
            axis=0))
        if 'sat_sigma_scale' in MCPM_options:
            sat_sigma *= MCPM_options['sat_sigma_scale']
        data = MM.MulensData(
            [sat_time, 0.*sat_time, sat_sigma],
            phot_fmt='flux', ephemerides_file=MCPM_options['ephemeris_file'],
            bandpass="******")
        datasets.append(data)

    # initiate event and minimizer
    if isinstance(model, MM.Model):
        event = MM.Event(datasets=datasets, model=model)
    else:
        event = PixelLensingEvent(datasets=datasets, model=model)
    params = parameters_to_fit[:]
    minimizer = Minimizer(event, params, cpm_sources)
    if emcee_settings['PTSampler']:
        minimizer.save_fluxes = False
        # Somehow blobs are not allowed for PTSampler.
    minimizer.file_all_models = file_all_models
    minimizer.set_chi2_0()
    if 'f_s_sat' in parameters_fixed or 'f_s_sat_over_u_0' in parameters_fixed:
        minimizer.set_satellite_source_flux(f_s_sat)
    if 'coeffs_fits_in' in MCPM_options:
        minimizer.read_coeffs_from_fits(MCPM_options['coeffs_fits_in'])
    if 'coeffs_fits_out' in MCPM_options:
        minimizer.start_coeffs_cache()
    if 'sat_sigma_scale' in MCPM_options:
        minimizer.sigma_scale = MCPM_options['sat_sigma_scale']
    if 'color_constraint' in MCPM_options:
        cc = 'color_constraint'
        ref_dataset = files.index(MCPM_options[cc][0])
        if len(MCPM_options[cc]) == 3:
            ref_mag = MM.utils.MAG_ZEROPOINT
        else:
            ref_mag = MCPM_options[cc][1]

        if len(MCPM_options[cc]) in [3, 4]:
            minimizer.add_color_constraint(
                ref_dataset, ref_mag,
                MCPM_options[cc][-2], MCPM_options[cc][-1])
        elif len(MCPM_options[cc]) in [5, 6, 7, 8]:
            minimizer.add_full_color_constraint(
                ref_dataset,
                files.index(MCPM_options[cc][1]),
                files.index(MCPM_options[cc][2]),
                *MCPM_options[cc][3:])
        else:
            raise ValueError('wrong size of "color_constraint" option')
    key = 'min_blending_flux'
    if key in other_constraints:
        index = files.index(other_constraints[key][0])
        other_constraints[key] = [datasets[index], other_constraints[key][1]]
    minimizer.other_constraints = other_constraints

    key = 'no_blending_files'
    if key in MCPM_options:
        indexes = [files.index(f) for f in MCPM_options['no_blending_files']]
        for ind in indexes:
            minimizer.fit_blending[ind] = False

    if 'mask_model_epochs' in MCPM_options:
        for i in range(minimizer.n_sat):
            minimizer.model_masks[i] = utils.mask_nearest_epochs(
                    cpm_sources[i].pixel_time+2450000.,
                    MCPM_options['mask_model_epochs'])

    # EMCEE fit:
    if emcee_settings['PTSampler']:
        print("EMCEE temps, walkers, steps, burn: {:} {:} {:} {:}".format(
            emcee_settings['n_temps'], emcee_settings['n_walkers'],
            emcee_settings['n_steps'], emcee_settings['n_burn']))
    else:
        print("EMCEE walkers, steps, burn: {:} {:} {:}".format(
            emcee_settings['n_walkers'], emcee_settings['n_steps'],
            emcee_settings['n_burn']))
    minimizer.set_prior_boundaries(min_values, max_values)
    for start_ in starting:
        if minimizer.ln_prior(start_) <= -float('inf'):
            raise ValueError('starting point is not in prior:\n' + str(start_))
    minimizer.set_prior_gaussian(priors_gauss)
    for (parameter, file_name) in priors_tabulated.items():
        minimizer.set_prior_tabulated(parameter, file_name)
    if emcee_settings['PTSampler']:
        sampler = emcee.PTSampler(
            emcee_settings['n_temps'], emcee_settings['n_walkers'], n_params,
            minimizer.ln_like, minimizer.ln_prior)
        shape = (emcee_settings['n_temps'], emcee_settings['n_walkers'],
                 n_params)
        starting = np.array(starting).reshape(shape)
    else:
        sampler = emcee.EnsembleSampler(
            emcee_settings['n_walkers'], n_params, minimizer.ln_prob)
    acceptance_fractions = []
    # run:
    kwargs = {'initial_state': starting,
              'iterations': emcee_settings['n_steps']}
    for _ in tqdm(sampler.sample(**kwargs), total=emcee_settings['n_steps']):
        acceptance_fractions.append(np.mean(sampler.acceptance_fraction))

    # cleanup and close minimizer:
    out_name = emcee_settings.get('file_acceptance_fractions', None)
    if out_name is not None:
        if len(out_name) == 0:
            out_name = config_file_root + ".accept"
        data_save = [str(i+1) + " " + str(af)
                     for (i, af) in enumerate(acceptance_fractions)]
        with open(out_name, 'w') as file_out:
            file_out.write('\n'.join(data_save))
    n_burn = emcee_settings['n_burn']
    if emcee_settings['PTSampler']:
        samples = sampler.chain[0, :, n_burn:, :].reshape((-1, n_params))
        n_fluxes = 0
    else:
        samples = sampler.chain[:, n_burn:, :].reshape((-1, n_params))
        blob_sampler = np.transpose(np.array(sampler.blobs), axes=(1, 0, 2))
        n_fluxes = blob_sampler.shape[-1]
    if 'coeffs_fits_out' in MCPM_options:
        minimizer.set_pixel_coeffs_from_samples(samples)
        minimizer.save_coeffs_to_fits(MCPM_options['coeffs_fits_out'])
        minimizer.stop_coeffs_cache()
    minimizer.close_file_all_models()

    # output
    text = "Mean acceptance fraction"
    fmt = ": {:.4f} +- {:.4f}"
    print(text + fmt.format(
        np.mean(sampler.acceptance_fraction),
        np.std(sampler.acceptance_fraction)))
    if emcee_settings['PTSampler']:
        print(text + " of the lowest temperature walkers" + fmt.format(
            np.mean(sampler.acceptance_fraction[0, :]),
            np.std(sampler.acceptance_fraction[0, :])))
    zip_ = zip(*np.percentile(samples, [16, 50, 84], axis=0))
    results = map(lambda v: (v[1], v[2]-v[1], v[0]-v[1]), zip_)
    for (param, r) in zip(parameters_to_fit, results):
        if r[1] < 1.e-3 or r[2] > -1.e-3:
            fmt = '{:7s} : {:.5f} {:+.4g} {:+.4g}'
        else:
            fmt = '{:7s} : {:.4f} {:+.4f} {:+.4f}'
        print(fmt.format(param, *r))
    if n_fluxes > 0:
        blob_samples = blob_sampler[:, n_burn:, :].reshape((-1, n_fluxes))
        percentiles = np.percentile(blob_samples, [16, 50, 84], axis=0)
        blob_results = map(
            lambda v: (v[1], v[2]-v[1], v[1]-v[0]), zip(*percentiles))
        msg = 'flux_{:}_{:} : {:.4f} {:.4f} {:.4f}'
        for (i, r) in zip(range(n_fluxes), blob_results):
            print(msg.format(['S', 'B'][i % 2], i//2+1, *r))
    if 'file_posterior' in emcee_settings:
        if len(emcee_settings['file_posterior']) == 0:
            emcee_settings['file_posterior'] = config_file_root + ".posterior"
        if emcee_settings['file_posterior'][-4:] == '.npy':
            if emcee_settings['PTSampler']:
                all_samples = sampler.chain[:, :, n_burn:, :]
            else:  # XXX blobs here
                all_samples = sampler.chain[:, n_burn:, :]
            np.save(emcee_settings['file_posterior'], all_samples)
        else:
            # all_samples = samples
            all_samples = sampler.chain[:, n_burn:, :]
            if n_fluxes > 0:
                all_samples = np.concatenate(
                    (all_samples, blob_samples), axis=1)
            np.save(emcee_settings['file_posterior'], all_samples)
    print('Best model:')
    minimizer.print_min_chi2()
    # Plots are below
    if 'file_corner' in emcee_settings or 'file_trace' in emcee_settings:
        data = sampler.chain[:, n_burn:, :]
        if 't_0' in parameters_to_fit:
            index = parameters_to_fit.index('t_0')
            data[:, :, index] -= int(np.median(data[:, :, index]))
            # The above line doesn't work properly for PTSampler XXX
    if 'file_trace' in emcee_settings:
        if len(emcee_settings['file_trace']) == 0:
            emcee_settings['file_trace'] = config_file_root + "_trace.png"
        alpha = 0.5
        grid = gridspec.GridSpec(n_params, 1, hspace=0)
        plt.figure(figsize=(7.5, 10.5))
        plt.subplots_adjust(left=0.13, right=0.97, top=0.99, bottom=0.05)
        plt.rcParams['font.size'] = 12
        plt.rcParams['axes.linewidth'] = 1.4
        for i in range(n_params):
            if i == 0:
                ax0 = plt.subplot(grid[i])
            else:
                plt.gcf().add_subplot(grid[i], sharex=ax0)
            plt.ylabel(parameters_to_fit[i])
            for j in range(data.shape[0]):
                vector = data[j, :, i]
                plt.plot(np.arange(len(vector)), vector, alpha=alpha)
            plt.xlim(0, len(vector))
            plt.gca().tick_params(axis='both', which='both', direction='in',
                                  top=True, right=True)
            if i != n_params - 1:
                plt.setp(plt.gca().get_xticklabels(), visible=False)
            plt.gca().set_prop_cycle(None)
        plt.xlabel('step count')
        plt.savefig(emcee_settings['file_trace'])
        plt.rcParams['font.size'] = 10  # resetting to defaults
        plt.rcParams['axes.linewidth'] = 0.8
    if 'file_corner' in emcee_settings:
        if len(emcee_settings['file_corner']) == 0:
            emcee_settings['file_corner'] = config_file_root + "_corner.png"
        kwargs = {'quantiles': [0.16, 0.50, 0.84], 'bins': 40,
                  'show_titles': True, 'labels': parameters_to_fit}
        corner.corner(data.reshape(-1, n_params), **kwargs)
        plt.savefig(emcee_settings['file_corner'])
Beispiel #2
0
            model.set_source_flux_ratio(q_f)
            model_magnification = model.magnification(times)
        else:
            model_magnification = model.magnification(
                times, separate=True)[0]  # This is very simple solution.
    if 'f_s_sat' in parameters:
        f_s_sat = parameters['f_s_sat']
    else:
        f_s_sat = parameters['f_s_sat_over_u_0'] * model.parameters.u_0
    cpm_source.run_cpm(f_s_sat * (model_magnification - 1.))

    utils.apply_limit_time(cpm_source, MCPM_options)

    mask = cpm_source.residuals_mask
    if 'mask_model_epochs' in MCPM_options:
        mask *= utils.mask_nearest_epochs(cpm_source.pixel_time + 2450000.,
                                          MCPM_options['mask_model_epochs'])
    sat_time = cpm_source.pixel_time[mask] + 2450000.
    # sat_sigma = sat_time * 0. + MCPM_options['sat_sigma']
    sat_sigma = np.sqrt(
        np.sum(np.array([err[mask] for err in cpm_source.pixel_flux_err])**2,
               axis=0))
    if 'sat_sigma_scale' in MCPM_options:
        sat_sigma *= MCPM_options['sat_sigma_scale']
    data = MM.MulensData([sat_time, 0. * sat_time, sat_sigma],
                         phot_fmt='flux',
                         ephemerides_file=MCPM_options['ephemeris_file'],
                         bandpass="******")
    datasets.append(data)

# initiate event and minimizer
event = MM.Event(datasets=datasets, model=model)
Beispiel #3
0
def evaluate_MM_MCPM(
        files, files_formats, files_kwargs, skycoord, methods, MCPM_options,
        parameters_fixed, parameter_values, model_ids, plot_files, txt_files,
        txt_files_prf_phot, txt_models, parameters_to_fit,
        plot_epochs, plot_epochs_type, plot_settings, gamma_LD,
        model_type=None, data_add_245=True):
    """
    Evaluate MCPM model.

    model_type: *None* or *str*
        Can be *None* (i.e., MM parameters are used), 'wide', 'close_A',
        or 'close_B'. If not None, then 't_0_pl', 'u_0_pl', and 't_E_pl'
        parameters are translated to s, q, alpha.
    """
    utils.get_standard_parameters.model_type = model_type

    # read datasets
    datasets = []
    if skycoord is not None:
        coords = MM.Coordinates(skycoord)
    else:
        coords = None
    if files is not None:
        for (file_, fmt, kwargs) in zip(files, files_formats, files_kwargs):
            data = MM.MulensData(file_name=file_, add_2450000=data_add_245,
                                 phot_fmt=fmt, coords=coords, **kwargs)
            datasets.append(data)

    # satellite datasets
    cpm_sources = []
    for campaign in MCPM_options['campaigns']:
        cpm_source = CpmFitSource(ra=skycoord.ra.deg, dec=skycoord.dec.deg,
                                  campaign=campaign,
                                  channel=MCPM_options['channel'])
        cpm_source.get_predictor_matrix(**MCPM_options['predictor_matrix'])
        cpm_source.set_l2_l2_per_pixel(
            l2=MCPM_options['l2'], l2_per_pixel=MCPM_options['l2_per_pixel'])
        cpm_source.set_pixels_square(MCPM_options['half_size'])
        if 'n_select' in MCPM_options:
            cpm_source.select_highest_prf_sum_pixels(MCPM_options['n_select'])

        cpm_sources.append(cpm_source)

    # initiate model
    model_begin = parameter_values[0]
    parameters = {
        key: value for (key, value) in zip(parameters_to_fit, model_begin)}
    parameters.update(parameters_fixed)
    parameters_ = {**parameters}
    for param in list(parameters_.keys()).copy():
        pop_keys = ['f_s_sat', 'f_s_sat_over_u_0']
        if param in pop_keys or param[:3] == 'q_f' or param[:7] == 'log_q_f':
            parameters_.pop(param)
        if 't_0_pl' in parameters_:
            parameters_ = utils.get_standard_parameters(parameters_)
    try:
        model = MM.Model(parameters_, coords=coords)
    except KeyError:
        model = PixelLensingModel(parameters_, coords=coords)
    for (m_key, m_value) in methods.items():
        model.set_magnification_methods(m_value, m_key)
    for (band, gamma) in gamma_LD.items():
        model.set_limb_coeff_gamma(band, gamma)
    if isinstance(model, MM.Model):
        if 'f_s_sat' in parameters:
            f_s_sat = parameters['f_s_sat']
        else:
            f_s_sat = parameters['f_s_sat_over_u_0'] * model.parameters.u_0

    for cpm_source in cpm_sources:
        times = cpm_source.pixel_time + 2450000.
        times[np.isnan(times)] = np.mean(times[~np.isnan(times)])
        if model.n_sources == 1:
            if not isinstance(model, MM.Model):
                model_flux = model.flux_difference(times)
            else:
                model_flux = f_s_sat * (_get_magnification(model, times) - 1.)
        else:
            if not isinstance(model, MM.Model):
                raise NotImplementedError('not yet coded for pixel lensing')
            if ('log_q_f' in parameters) or ('q_f' in parameters):
                if 'log_q_f' in parameters:
                    q_f = 10**parameters['log_q_f']
                else:
                    q_f = parameters['q_f']
                model.set_source_flux_ratio(q_f)
                model_magnification = _get_magnification(model, times)
            else:  # This is very simple solution.
                model_magnification = _get_magnification(
                    model, times, separate=True)[0]
            model_flux = f_s_sat * (model_magnification - 1.)
        cpm_source.run_cpm(model_flux)

        utils.apply_limit_time(cpm_source, MCPM_options)

        mask = cpm_source.residuals_mask
        if 'mask_model_epochs' in MCPM_options:
            mask *= utils.mask_nearest_epochs(
                cpm_source.pixel_time+2450000.,
                MCPM_options['mask_model_epochs'])
        sat_time = cpm_source.pixel_time[mask] + 2450000.
        # sat_sigma = sat_time * 0. + MCPM_options['sat_sigma']
        err_masked = [err[mask] for err in cpm_source.pixel_flux_err]
        sat_sigma = np.sqrt(np.sum(np.array(err_masked)**2, axis=0))
        if 'sat_sigma_scale' in MCPM_options:
            sat_sigma *= MCPM_options['sat_sigma_scale']
        data = MM.MulensData(
            [sat_time, 0.*sat_time, sat_sigma],
            phot_fmt='flux', ephemerides_file=MCPM_options['ephemeris_file'],
            bandpass="******", coords=coords)
        datasets.append(data)

    # initiate event
    if isinstance(model, MM.Model):
        event = MM.Event(datasets=datasets, model=model)
    else:
        event = PixelLensingEvent(datasets=datasets, model=model)
    params = parameters_to_fit[:]
    minimizer = Minimizer(event, params, cpm_sources)
    if 'f_s_sat' in parameters_fixed or 'f_s_sat_over_u_0' in parameters_fixed:
        minimizer.set_satellite_source_flux(f_s_sat)
    if 'coeffs_fits_in' in MCPM_options:
        minimizer.read_coeffs_from_fits(MCPM_options['coeffs_fits_in'])
    if 'coeffs_fits_out' in MCPM_options:
        raise ValueError("coeffs_fits_out cannot be set in this program")
    if 'sat_sigma_scale' in MCPM_options:
        minimizer.sigma_scale = MCPM_options['sat_sigma_scale']
    if 'color_constraint' in MCPM_options:
        cc = 'color_constraint'
        ref_dataset = files.index(MCPM_options[cc][0])
        if len(MCPM_options[cc]) == 3:
            ref_mag = MM.utils.MAG_ZEROPOINT
        else:
            ref_mag = MCPM_options[cc][1]

        if len(MCPM_options[cc]) in [3, 4]:
            minimizer.add_color_constraint(
                ref_dataset, ref_mag,
                MCPM_options[cc][-2], MCPM_options[cc][-1])
        elif len(MCPM_options[cc]) in [5, 6, 7, 8]:
            minimizer.add_full_color_constraint(
                ref_dataset, files.index(MCPM_options[cc][1]),
                files.index(MCPM_options[cc][2]), *MCPM_options[cc][3:])

    key = 'no_blending_files'
    if key in MCPM_options:
        indexes = [files.index(f) for f in MCPM_options['no_blending_files']]
        for ind in indexes:
            minimizer.fit_blending[ind] = False

    if 'mask_model_epochs' in MCPM_options:
        minimizer.model_masks[0] = utils.mask_nearest_epochs(
            cpm_sources[0].pixel_time+2450000.,
            MCPM_options['mask_model_epochs'])

    # main loop:
    zipped = zip(parameter_values, model_ids, plot_files, txt_files,
                 txt_files_prf_phot, txt_models, plot_epochs)
    for zip_single in zipped:
        (values, name, plot_file) = zip_single[:3]
        (txt_file, txt_file_prf_phot, txt_model, plot_epochs_) = zip_single[3:]
        minimizer.set_parameters(values)
        chi2 = minimizer.chi2_fun(values)
        print(name, chi2)
        minimizer.set_satellite_data(values)

        if txt_file_prf_phot is not None:
            (y, y_mask) = cpm_source.prf_photometry()
            x = cpm_source.pixel_time[y_mask]
            err = (cpm_source.all_pixels_flux_err *
                   MCPM_options['sat_sigma_scale'])
            y_model = minimizer._sat_models[0][y_mask]
            # XXX We should not use private property above.
            out = [x, y[y_mask], err[y_mask], y[y_mask]-y_model]
            np.savetxt(txt_file_prf_phot, np.array(out).T)
        if txt_file is not None:
            y_mask = cpm_source.residuals_mask
            x = cpm_source.pixel_time[y_mask]
            y = minimizer.event.datasets[-1].flux
            y_err = cpm_source.all_pixels_flux_err[y_mask]
            y_err *= MCPM_options['sat_sigma_scale']
            y_model = minimizer._sat_models[0][y_mask]
            # XXX We should not use private property above.
            np.savetxt(txt_file, np.array([x, y, y_err, y-y_model]).T)
        if txt_model is not None:
            y_mask = cpm_source.residuals_mask
            x = cpm_source.pixel_time[y_mask]
            y_model = minimizer._sat_models[0][y_mask]
            # XXX We should not use private property above.
            np.savetxt(txt_model, np.array([x, y_model]).T)
        if plot_file is not None:
            minimizer.set_satellite_data(values)
            campaigns = MCPM_options['campaigns']
            if 'xlim' in plot_settings:
                (t_beg, t_end) = plot_settings.pop('xlim')
            elif 91 in campaigns and 92 in campaigns:
                (t_beg, t_end) = (7500.3, 7573.5)
            elif 91 in campaigns:
                (t_beg, t_end) = (7500.3, 7528.0)
            elif 92 in campaigns:
                (t_beg, t_end) = (7530., 7573.5)
            else:
                (t_beg, t_end) = (7425., 7670.)
            ylim = plot_settings.pop('ylim', None)
            ylim_residuals = plot_settings.pop('ylim_residuals', None)
            adjust = dict(left=0.09, right=0.995, bottom=0.08, top=0.995)
            if len(plot_settings) == 0:
                minimizer.very_standard_plot(t_beg, t_end, ylim, title=name)
            else:
                minimizer.standard_plot(t_beg, t_end, ylim, title=name,
                                        **plot_settings)
                if 'fluxes_y_axis' in plot_settings:
                    adjust['right'] = 0.895
            if ylim_residuals is not None:
                plt.ylim(*ylim_residuals)
                if ylim_residuals[0] > 0.1 and -ylim_residuals[1] > 0.1:
                    fmt = ticker.FormatStrFormatter('%0.1f')
                    plt.gca().yaxis.set_major_formatter(fmt)
            plt.xlabel("BJD-2450000")
            plt.subplots_adjust(**adjust)
            if len(plot_file) == 0:
                plt.show()
            else:
                plt.savefig(plot_file, dpi=400)
                print("{:} file saved".format(plot_file))
            plt.close()
        if plot_epochs_ is not None:
            if minimizer.n_sat != 1:
                raise ValueError('.n_sat != 1 is not implemented')
            for epoch in plot_epochs_:
                args = [epoch - 2450000.]
                if plot_epochs_type is not None:
                    args.append(plot_epochs_type)
                minimizer.cpm_sources[0].plot_image(*args)
                plt.show()
        if len(datasets) > 1:
            print("Non-K2 datasets (i, chi2, F_s, F_b):")
            zip_ = zip(datasets, minimizer.fit_blending)
            for (i, (dat, fb)) in enumerate(zip_):
                chi2_data = event.get_chi2_for_dataset(i, fit_blending=fb)
                print(i, chi2_data, event.fit.flux_of_sources(dat)[0],
                      event.fit.blending_flux(dat))
                if i < len(files):
                    chi2 -= chi2_data
            print("-----")
            print("K2 chi2: ", chi2)
        if len(cpm_sources) > 0:
            if isinstance(model, MM.Model):
                print("Satellite t_0, u_0, A_max:")
            else:
                print("Satellite t_0, delta_flux_max:")
            print(*minimizer.satellite_maximum())
        print()