Example #1
0
def run_minimize(obs={},
                 model=None,
                 sps=None,
                 noise=None,
                 lnprobfn=lnprobfn,
                 min_method='lm',
                 min_opts={},
                 nmin=1,
                 pool=None,
                 **kwargs):
    """Run a minimization
    """
    initial = model.theta.copy()

    lsq = ['lm']
    scalar = ['powell']

    # --- Set some options based on minimization method ---
    if min_method in lsq:
        algorithm = least_squares
        residuals = True
        min_opts["x_scale"] = "jac"
    elif min_method in scalar:
        algorithm = minimize
        residuals = False

    args = []
    loss = argfix(lnprobfn,
                  obs=obs,
                  model=model,
                  sps=sps,
                  noise=noise,
                  residuals=residuals)
    minimizer = minimize_wrapper(algorithm, loss, [], min_method, min_opts)
    qinit = minimizer_ball(initial, nmin, model)

    if pool is not None:
        M = pool.map
    else:
        M = map

    t = time.time()
    results = list(M(minimizer, [np.array(q) for q in qinit]))
    tm = time.time() - t

    if min_method in lsq:
        chisq = [np.sum(r.fun**2) for r in results]
        best = np.argmin(chisq)
    elif min_method in scalar:
        best = np.argmin([p.fun for p in results])

    return results, tm, best
Example #2
0
def run_minimize(obs=None,
                 model=None,
                 sps=None,
                 noise=None,
                 lnprobfn=lnprobfn,
                 min_method='lm',
                 min_opts={},
                 nmin=1,
                 pool=None,
                 **extras):
    """Run a minimization.  This wraps the lnprobfn fixing the ``obs``,
    ``model``, ``noise``, and ``sps`` objects, and then runs a minimization of
    -lnP using scipy.optimize methods.
    
    :param obs:
        The ``obs`` dictionary containing the data to fit to, which will be
        passed to ``lnprobfn``.

    :param model:
        An instance of the :py:class:`prospect.models.SedModel` class
        containing the model parameterization and parameter state.  It will be
        passed to ``lnprobfn``.

    :param sps:
        An instance of a :py:class:`prospect.sources.SSPBasis` (sub-)class.
        Alternatively, anything with a compatible :py:method:`get_spectrum` can
        be used here. It will be passed to ``lnprobfn``

    :param noise: (optional) 
        If given, a tuple of :py:class:`NoiseModel` objects passed to
        ``lnprobfn``.

    :param lnprobfn: (optional, default: lnprobfn)
        A posterior probability function that can take ``obs``, ``model``,
        ``sps``, and ``noise`` as keywords. By default use the
        :py:method:`lnprobfn` defined above.

    :param min_method: (optional, default: 'lm')
        Method to use for minimization
        * 'lm': Levenberg-Marquardt
        * 'powell': Powell line search method
    
    :param nmin: (optional, default: 1)
        Number of minimizations to do.  Beyond the first, minimizations will be
        started from draws from the prior.

    :param min_opts: (optional, default: {})
        Dictionary of minimization options passed to the scipy.optimize method.
        These include things like 'xtol', 'ftol', etc..

    :param pool: (optional, default: None)
        A pool to use for parallel optimization from multiple initial positions.

    :returns results:
        A list of `scipy.optimize.OptimizeResult` objects.

    :returns tm:
        Wall time used for the minimization, in seconds.

    :returns best:
        The index of the results list containing the lowest chi-square result.
    """
    initial = model.theta.copy()

    lsq = ["lm"]
    scalar = ["powell"]

    # --- Set some options based on minimization method ---
    if min_method in lsq:
        algorithm = least_squares
        residuals = True
        min_opts["x_scale"] = "jac"
    elif min_method in scalar:
        algorithm = minimize
        residuals = False

    args = []
    loss = argfix(lnprobfn,
                  obs=obs,
                  model=model,
                  sps=sps,
                  noise=noise,
                  residuals=residuals)
    minimizer = minimize_wrapper(algorithm, loss, [], min_method, min_opts)
    qinit = minimizer_ball(initial, nmin, model)

    if pool is not None:
        M = pool.map
    else:
        M = map

    t = time.time()
    results = list(M(minimizer, [np.array(q) for q in qinit]))
    tm = time.time() - t

    if min_method in lsq:
        chisq = [np.sum(r.fun**2) for r in results]
        best = np.argmin(chisq)
    elif min_method in scalar:
        best = np.argmin([p.fun for p in results])

    return results, tm, best
Example #3
0
def wrap_lnp(lnpfn, obs, model, sps, **lnp_kwargs):
    return argfix(lnpfn, obs=obs, model=model, sps=sps, **lnp_kwargs)
Example #4
0
        result.scene = scene
        result.truths = ptrue.copy()
        result.metric = np.copy(metric.variance)
        result.step_size = sampler.step_size.get_step_size()
        with open("sim_sersic_single_hemcee.pkl", "wb") as f:
            pickle.dump(result, f)

        fig, axes = pl.subplots(7, 3, sharex=True)
        for i, ax in enumerate(axes.T.flat): ax.plot(chain[:, i])
        for i, ax in enumerate(axes.T.flat): ax.axhline(p0[i], color='k', linestyle=':')
        for i, ax in enumerate(axes.T.flat): ax.set_title(label[i])


    # --- nested ---
    if False:
        lnlike = argfix(lnlike_multi, scene=scene, plans=plans, grad=False)
        theta_width = (upper - lower)
        nlive = 50
        
        def prior_transform(unit_coords):
            # now scale and shift
            theta = lower + theta_width * unit_coords
            return theta

        import dynesty
        
        # "Standard" nested sampling.
        sampler = dynesty.DynamicNestedSampler(lnlike, prior_transform, ndim, nlive=nlive,
                                               bound="multi", method="slice", bootstrap=0)
        t0 = time.time()
        sampler.run_nested(nlive_init=int(nlive/2), nlive_batch=int(nlive),
Example #5
0
              for im in imnames]

    # override the psf to reflect in both directions
    T = -1.0 * np.eye(2)
    for s in stamps:
        s.psf.covariances = np.matmul(T, np.matmul(s.psf.covariances, T.T))
        s.psf.means = np.matmul(s.psf.means, T)

    # --- get the Scene ---
    scene = Scene()
    sources = [Star()]
    sources[0].id = 0
    scene.sources = sources
    label = ['flux', 'alpha', 'delta']

    nll = argfix(negative_lnlike_multistamp, scene=scene, stamps=stamps)

    # --- Initialize ---
    theta_init = np.array([ra_init, dec_init, stamps[0].pixel_values.sum() * 1.0])


    # --- Optimization ---
    if True:
        def callback(x):
            #nf += 1
            print(x, nll(x))

        p0 = theta_init.copy()
        #p0[0] = 4500. #34.44
        bounds = [(0, 1e4), (0., 100), (0, 100)]
        from scipy.optimize import minimize
Example #6
0
        stamp.crval = np.zeros([2])
        # The pixel coordinates of the reference pixel
        stamp.crpix = np.zeros([2])

    # override the psf to reflect in both directions
    T = -1.0 * np.eye(2)
    stamp.psf.covariances = np.matmul(T, np.matmul(stamp.psf.covariances, T.T))
    stamp.psf.means = np.matmul(stamp.psf.means, T)

    # --- get the Scene ---
    sources = [Star(filters=["F090W"])]
    label = ['flux', 'x', 'y']
    scene = Scene(sources)
    plans = [WorkPlan(stamp)]

    nll = argfix(negative_lnlike_multi, scene=scene, plans=plans)
    nll_nograd = argfix(negative_lnlike_multi,
                        scene=scene,
                        plans=plans,
                        grad=False)

    # --- Initialize ---
    theta_init = np.array([stamp.pixel_values.sum() * 1.0, ra_init, dec_init])
    if inpixels:
        world = np.array([ra_init, dec_init, 0])
        hdr = fits.getheader(imname)
        ast = apy_wcs.WCS(hdr)
        center = ast.wcs_world2pix(world[None, :], 0)[0, :2] - stamp.lo
        theta_init = np.array(
            [stamp.pixel_values.sum() * 0.5, center[0], center[1]])
    image_init, partials = make_image(scene, stamp, Theta=theta_init)
Example #7
0
def fit_source(ra=53.115295, dec=-27.803501, dofit=True, nlive=100):

    # --- Build the postage stamp ----
    ra_init, dec_init = ra, dec
    pos_init = (ra_init, dec_init)
    stamps = [
        make_stamp(im,
                   pos_init,
                   center_type='celestial',
                   size=(50, 50),
                   psfname=psfname) for im in imnames
    ]

    # override the psf to reflect in both directions
    T = -1.0 * np.eye(2)
    for s in stamps:
        s.psf.covariances = np.matmul(T, np.matmul(s.psf.covariances, T.T))
        s.psf.means = np.matmul(s.psf.means, T)

    # --- get the Scene ---
    source = Star(filters=["F090W"])
    scene = Scene([source])
    label = ['Counts', 'RA', 'Dec']

    plans = [WorkPlan(stamp) for stamp in stamps]
    lnlike = argfix(lnlike_multi, scene=scene, plans=plans, grad=False)

    # --- Initialize ---
    theta_init = np.array(
        [stamps[0].pixel_values.sum() * 1.0, ra_init, dec_init])
    # a rough measure of dcoordinate/dpix
    plate_scale, _ = np.linalg.eig(np.linalg.inv(stamps[0].dpix_dsky))
    # make the prior ~10 pixels wide, and 50% of counts
    theta_width = np.array(
        [0.5 * theta_init[0], 10 * plate_scale[0], 10 * plate_scale[1]])
    #print(theta_init, theta_width)

    # --- Nested sampling ---
    ndim = 3

    def prior_transform(unit_coords):
        # convert to uniform -1 to 1
        u = (2 * unit_coords - 1.)
        # now scale and shift
        theta = theta_init + theta_width * u
        return theta

    if dofit:
        import dynesty, time

        # "Standard" nested sampling.
        sampler = dynesty.NestedSampler(lnlike,
                                        prior_transform,
                                        ndim,
                                        nlive=nlive,
                                        bootstrap=0)
        t0 = time.time()
        sampler.run_nested()
        dur = time.time() - t0
        results = sampler.results
        results['duration'] = dur
        indmax = results['logl'].argmax()
        theta_max = results['samples'][indmax, :]

    else:
        results = None
        theta_max = np.zeros(3)
        stamps = None

    return results, theta_max, stamps, scene
Example #8
0
def fit_source(ra=53.115325, dec=-27.803518, imname='', psfname=None,
               stamp_size=(100, 100), use_grad=True,
               err_expand=1.0, jitter=0.0, gain=np.inf):
    """
    """
    # --- Build the postage stamp ----
    stamp = make_stamp(imname, (ra, dec), stamp_size,
                       psfname=psfname, center_type='celestial')
    stamp.snr = stamp.pixel_values * stamp.ierr
    stamp.ierr = stamp.ierr.flatten() / err_expand
    counts = stamp.pixel_values.flatten() - stamp.pixel_values.min()
    stamp.ierr = 1.0 / np.sqrt(1/stamp.ierr**2 + jitter**2 + counts/gain)

    # override the WCS so coordinates are in pixels
    # The scale matrix D
    stamp.scale = np.eye(2)
    # The sky coordinates of the reference pixel
    stamp.crval = np.zeros([2])
    # The pixel coordinates of the reference pixel
    stamp.crpix = np.zeros([2])

    # Rotate the PSF by 180 degrees
    T = -1.0 * np.eye(2)
    stamp.psf.covariances = np.matmul(T, np.matmul(stamp.psf.covariances, T.T))
    stamp.psf.means = np.matmul(stamp.psf.means, T)

    # --- get the Scene ---
    scene = Scene(galaxy=False)
    sources = [Star()]
    scene.sources = sources

    # ---- Optimization ------
    if use_grad:
        nll = argfix(negative_lnlike_stamp, scene=scene, stamp=stamp)
    else:
        nll = argfix(negative_lnlike_nograd, scene=scene, stamp=stamp)
    if False:
        nll = argfix(chi_vector, scene=scene, stamp=stamp)
        method = 'lm'
        use_grad = False
        
    if True:
        def callback(x):
            #nf += 1
            print(x, nll(x))
        callback = None

        # Initial and bounds
        p0 = np.array([stamp.pixel_values.sum(), stamp.nx/2, stamp.ny/2])
        p0 += np.random.normal(0., [0.1 * p0[0], 0.5, 0.5])
        bounds = [(1, 1e4), (0., stamp_size[0]), (0, stamp_size[1])]
        bounds = None
        
        # Optimize
        from scipy.optimize import minimize
        lbfgsb_opt = {'ftol': 1e-20, 'gtol': 1e-12, 'disp':True, 'iprint': -1, 'maxcor': 20}
        result = minimize(nll, p0, jac=use_grad, bounds=None, callback=callback,
                          options=lbfgsb_opt)

        # plot results
        resid, partials = make_image(result.x, scene, stamp)
        dim = stamp.pixel_values
        mim = resid
        chi = (dim - mim) * stamp.ierr.reshape(stamp.nx, stamp.ny)
        
        fig, axes = pl.subplots(1, 4, sharex=True, sharey=True, figsize=(14.75, 3.25))
        images = [dim, mim, dim-mim, chi]
        labels = ['Data', 'Model', 'Data-Model', '$\chi$']
        for k, ax in enumerate(axes):
            c = ax.imshow(images[k].T, origin='lower')
            pl.colorbar(c, ax=ax)
            ax.set_title(labels[k])

        return result, (fig, axes), nll(result.x), stamp, scene
Example #9
0
        stamp.pixel_values += noise

    return scene, stamp, ptrue, label


if __name__ == "__main__":

    # Get a scene and a stamp at some parameters
    scene, stamp, ptrue, label = setup_scene(galaxy=True,
                                             fwhm=2.0,
                                             fudge=1.25,
                                             add_noise=True)
    true_image, partials = make_image(ptrue, scene, stamp)

    # Set up likelihoods
    nll = argfix(negative_lnlike_stamp, scene=scene, stamp=stamp)
    nll_nograd = argfix(negative_lnlike_nograd, scene=scene, stamp=stamp)

    # --- Chi2 on a grid ------
    # needs to be debugged
    if False:
        mux = np.linspace(47, 53., 100)
        muy = np.linspace(47, 53., 100)
        flux = np.linspace(3000, 5000., 10)
        chi2 = np.zeros([len(mux), len(muy), len(flux)])

        for i, x in enumerate(mux):
            for j, y in enumerate(muy):
                for k, f in enumerate(flux):
                    theta = np.array([f, x, y])
                    chi2[i, j, k] = nll(theta)[0]
def fit_source(ra=53.115295,
               dec=-27.803501,
               mag=None,
               dofit='dynesty',
               nlive=100,
               nburn=600,
               niter=200):

    # --- Get the data ---
    stamps = prep_stamps(ra, dec)

    # --- Get the Scene ---
    source = Star(filters=filters)
    scene = Scene([source])
    label = ['Counts', 'RA', 'Dec']

    plans = [WorkPlan(stamp) for stamp in stamps]

    # --- Initialize and set scales ---
    if mag is None:
        counts = [
            np.clip(stamp.pixel_values.sum(), 1, np.inf) for stamp in stamps
        ]
    else:
        counts = [
            10**(0.4 * (stamp.full_header["ABMAG"] - mag)) for stamp in stamps
        ]
    theta_init = np.array(counts + [ra, dec])
    # a rough measure of dcoordinate/dpix - this doesn't work so great
    plate_scale = np.linalg.eigvals(np.linalg.inv(stamps[0].dpix_dsky))
    plate_scale = np.abs(plate_scale)
    # make the prior ~5 pixels wide, and 100% of expected counts
    theta_width = np.array(
        [theta_init[0], 5 * plate_scale[0], 5 * plate_scale[1]])
    print(theta_init, theta_width)

    # --- Sampling ---
    ndim = 3
    p0 = theta_init.copy()
    upper = theta_init + theta_width / 2.
    lower = theta_init - theta_width / 2.
    scales = theta_width

    if dofit == 'dynesty':

        lnlike = argfix(lnlike_multi, scene=scene, plans=plans, grad=False)

        def prior_transform(unit_coords):
            # convert to uniform -1 to 1
            u = (2 * unit_coords - 1.)
            # now scale and shift
            theta = theta_init + theta_width * u
            return theta

        import dynesty, time

        # "Standard" nested sampling.
        sampler = dynesty.NestedSampler(lnlike,
                                        prior_transform,
                                        ndim,
                                        bootstrap=0,
                                        nlive=nlive)
        t0 = time.time()
        sampler.run_nested()
        dur = time.time() - t0
        results = sampler.results
        results['duration'] = dur
        indmax = results['logl'].argmax()
        theta_max = results['samples'][indmax, :]

    elif dofit == "hmc":

        from hmc import BasicHMC

        model = Posterior(scene, plans, upper=upper, lower=lower)
        hsampler = BasicHMC(model, verbose=False)
        hsampler.ndim = len(p0)
        hsampler.set_mass_matrix(1 / scales**2)
        eps = hsampler.find_reasonable_stepsize(p0)
        pos, prob, grad = hsampler.sample(p0,
                                          iterations=nburn,
                                          mass_matrix=1 / scales**2,
                                          epsilon=eps * 1.5,
                                          length=10,
                                          sigma_length=3,
                                          store_trajectories=False)
        pos, prob, grad = hsampler.sample(pos,
                                          iterations=niter,
                                          mass_matrix=1 / scales**2,
                                          epsilon=eps,
                                          length=20,
                                          sigma_length=5,
                                          store_trajectories=True)

        results = {
            "samples": sampler.chain.copy(),
            "lnprobability": sampler.lnp.copy()
        }
        theta_max = sampler.chain[np.argmax(sampler.lnprob), :]

    elif dofit == "hemcee":

        from hemcee import NoUTurnSampler
        from hemcee.metric import DiagonalMetric

        model = Posterior(scene, plans, upper=np.inf, lower=-np.inf)
        metric = DiagonalMetric(scales**2)
        usampler = NoUTurnSampler(model.lnprob,
                                  model.lnprob_grad,
                                  metric=metric)
        pos, lnp0 = usampler.run_warmup(p0, nburn)
        chain, lnp = usampler.run_mcmc(pos, niter)

        results = {"samples": chain, "lnprobability": lnp}
        theta_max = chain[np.argmax(lnp), :]

    else:
        results = None
        theta_max = np.zeros(3)

    return results, theta_max, stamps, scene
Example #11
0
def run_minimize(obs=None, model=None, sps=None, noise=None, lnprobfn=lnprobfn,
                 min_method='lm', min_opts={}, nmin=1, pool=None, **extras):
    """Run a minimization.  This wraps the lnprobfn fixing the ``obs``,
    ``model``, ``noise``, and ``sps`` objects, and then runs a minimization of
    -lnP using scipy.optimize methods.
    
    :param obs:
        The ``obs`` dictionary containing the data to fit to, which will be
        passed to ``lnprobfn``.

    :param model:
        An instance of the :py:class:`prospect.models.SedModel` class
        containing the model parameterization and parameter state.  It will be
        passed to ``lnprobfn``.

    :param sps:
        An instance of a :py:class:`prospect.sources.SSPBasis` (sub-)class.
        Alternatively, anything with a compatible :py:method:`get_spectrum` can
        be used here. It will be passed to ``lnprobfn``

    :param noise: (optional) 
        If given, a tuple of :py:class:`NoiseModel` objects passed to
        ``lnprobfn``.

    :param lnprobfn: (optional, default: lnprobfn)
        A posterior probability function that can take ``obs``, ``model``,
        ``sps``, and ``noise`` as keywords. By default use the
        :py:method:`lnprobfn` defined above.

    :param min_method: (optional, default: 'lm')
        Method to use for minimization
        * 'lm': Levenberg-Marquardt
        * 'powell': Powell line search method
    
    :param nmin: (optional, default: 1)
        Number of minimizations to do.  Beyond the first, minimizations will be
        started from draws from the prior.

    :param min_opts: (optional, default: {})
        Dictionary of minimization options passed to the scipy.optimize method.
        These include things like 'xtol', 'ftol', etc..

    :param pool: (optional, default: None)
        A pool to use for parallel optimization from multiple initial positions.

    :returns results:
        A list of `scipy.optimize.OptimizeResult` objects.

    :returns tm:
        Wall time used for the minimization, in seconds.

    :returns best:
        The index of the results list containing the lowest chi-square result.
    """
    initial = model.theta.copy()
    
    lsq = ["lm"]
    scalar = ["powell"]

    # --- Set some options based on minimization method ---
    if min_method in lsq:
        algorithm = least_squares
        residuals = True
        min_opts["x_scale"] = "jac"
    elif min_method in scalar:
        algorithm = minimize
        residuals = False

    args = []
    loss = argfix(lnprobfn, obs=obs, model=model, sps=sps,
                  noise=noise, residuals=residuals)
    minimizer = minimize_wrapper(algorithm, loss, [], min_method, min_opts)
    qinit = minimizer_ball(initial, nmin, model)

    if pool is not None:
            M = pool.map
    else:
            M = map

    t = time.time()
    results = list(M(minimizer, [np.array(q) for q in qinit]))
    tm = time.time() - t

    if min_method in lsq:
        chisq = [np.sum(r.fun**2) for r in results]
        best = np.argmin(chisq)
    elif min_method in scalar:
        best = np.argmin([p.fun for p in results])

    return results, tm, best
Example #12
0
def wrap_lnp(lnpfn, obs, model, sps, **lnp_kwargs):
    return argfix(lnpfn, obs=obs, model=model, sps=sps,
                  **lnp_kwargs)
Example #13
0
    sourcepars = [([30.], 10., 10., 0.7, 45, 2.1, 0.07)]
    upper = np.array([60., 20., 20., 1.0, np.pi / 2., 5.0, 0.12])
    lower = np.array([10., 5., 5., 0.0, -np.pi / 2, 1.0, 0.03])

    scene, stamps, ptrue, label = setup_scene(sourceparams=sourcepars,
                                              splinedata=paths.galmixture,
                                              perturb=0.0,
                                              add_noise=True,
                                              snr_max=50.,
                                              filters=filters,
                                              stamp_kwargs=stamp_kwargs)

    #sys.exit()
    # --- Set up posterior prob fns ----
    plans = [WorkPlan(stamp) for stamp in stamps]
    nll = argfix(negative_lnlike_multi, scene=scene, plans=plans)

    #upper = np.zeros(5) + 1000
    #lower = np.zeros(5) - 1000
    model = Posterior(scene, plans, upper=upper, lower=lower)

    # --- Gradient Check ---
    if True:
        delta = np.ones_like(ptrue) * 1e-7
        #numerical
        grad_num = numerical_image_gradients(ptrue, delta, scene, stamp)
        image, grad = make_image(scene, stamps[0], Theta=ptrue)
        fig, axes = pl.subplots(len(ptrue), 3, sharex=True, sharey=True)
        for i in range(len(ptrue)):
            g = grad[i, :].reshape(stamp.nx, stamp.ny)
            c = axes[i, 0].imshow(grad_num[i, :, :].T, origin='lower')