示例#1
0
    def params(self, pars):
        #Assumes pars are coming in as Temp, logg, Z.
        #If the parameters are out of bounds, raise an error
        if np.any(pars < self.min_params) or np.any(pars > self.max_params):
            raise C.ModelError("Emulating outside of the grid.")

        self._params = pars
示例#2
0
    def evaluate(self):
        '''
        Return the lnprob using the current version of the DataCovariance matrix
        and other intermediate products.
        '''
        self.lnprob_last = self.lnprob

        X = (self.ChebyshevSpectrum.k * self.flux_std *
             np.eye(self.npoints)).dot(self.pcomps.T)

        CC = X.dot(self.C_GP.dot(X.T)) + self.data_mat

        R = self.fl - self.ChebyshevSpectrum.k * self.flux_mean - X.dot(
            self.mus)

        try:
            factor, flag = cho_factor(CC)
        except np.linalg.LinAlgError as e:
            self.logger.debug("self.sampler.params are {}".format(
                self.sampler.params))
            raise C.ModelError("Can't Cholesky factor {}".format(e))

        logdet = np.sum(2 * np.log((np.diag(factor))))

        self.lnprob = -0.5 * (np.dot(R, cho_solve(
            (factor, flag), R)) + logdet) + self.prior

        if self.counter % 100 == 0:
            self.resid_deque.append(R)

        self.counter += 1

        return self.lnprob
示例#3
0
    def update_Phi(self, phi):
        self.logger.debug("Updating nuisance parameters to {}".format(phi))

        # Read off the Chebyshev parameters and update
        self.chebyshevSpectrum.update(phi.cheb)

        # Check to make sure the global covariance parameters make sense
        if phi.sigAmp < 0.1:
            raise C.ModelError("sigAmp shouldn't be lower than 0.1, something is wrong.")

        max_r = 6.0 * phi.l # [km/s]

        # Create a partial function which returns the proper element.
        k_func = make_k_func(phi)

        # Store the previous data matrix in case we want to revert later
        self.data_mat_last = self.data_mat
        self.data_mat = get_dense_C(self.wl, k_func=k_func, max_r=max_r) + phi.sigAmp*self.sigma_mat + self.region_mat
示例#4
0
    def params(self, pars):

        # If the pars is outside of the range of emulator values, raise a ModelError
        if np.any(pars < self.min_params) or np.any(pars > self.max_params):
            raise C.ModelError("Querying emulator outside of original PCA parameter range.")

        # Assumes pars is a single parameter combination, as a 1D np.array
        self._params = pars

        # Do this according to R&W eqn 2.18, 2.19

        # Recalculate V12, V21, and V22.
        self.V12 = V12(self._params, self.pca.gparams, self.h2params, self.pca.m)
        self.V22 = V22(self._params, self.h2params, self.pca.m)

        # Recalculate the covariance
        self.mu = self.V12.T.dot(np.linalg.solve(self.V11, self.pca.w_hat))
        self.mu.shape = (-1)
        self.sig = self.V22 - self.V12.T.dot(np.linalg.solve(self.V11, self.V12))
示例#5
0
    def update_nuisance(self, params):
        '''
        Update the nuisance parameters and data covariance matrix.

        :param params: large dictionary containing cheb, cov, and regions
        '''

        self.logger.debug("Updating nuisance parameters to {}".format(params))
        # Read off the Chebyshev parameters and update
        self.ChebyshevSpectrum.update(params["cheb"])

        # Create the full data covariance matrix.
        l = params["cov"]["l"]
        sigAmp = params["cov"]["sigAmp"]

        # Check to make sure the global covariance parameters make sense
        if sigAmp < 0.1:
            raise C.ModelError(
                "sigAmp shouldn't be lower than 0.1, something is wrong.")

        max_r = 6.0 * l  # [km/s]

        # Check all regions, take the max
        if self.nregions > 0:
            regions = params["regions"]
            keys = sorted(regions)
            sigmas = np.array([regions[key]["sigma"] for key in keys])  #km/s
            #mus = np.array([regions[key]["mu"] for key in keys])
            max_reg = 4.0 * np.max(sigmas)
            #If this is a larger distance than the global length, replace it
            max_r = max_reg if max_reg > max_r else max_r
            #print("Max_r now set by regions {}".format(max_r))

        # print("max_r is {}".format(max_r))

        # Create a partial function which returns the proper element.
        k_func = make_k_func(params)

        # Store the previous data matrix in case we want to revert later
        self.data_mat_last = self.data_mat
        self.data_mat = get_dense_C(self.wl, k_func=k_func,
                                    max_r=max_r) + sigAmp * self.sigma_matrix
    def update_Theta(self, p):
        '''
        Update the model to the current Theta parameters.

        :param p: parameters to update model to
        :type p: model.ThetaParam
        '''

        # durty HACK to get fixed logg
        # Simply fixes the middle value to be 4.29
        # Check to see if it exists, as well
        fix_logg = Starfish.config.get("fix_logg", None)
        if fix_logg is not None:
            p.grid[1] = fix_logg
        #print("grid pars are", p.grid)

        self.logger.debug("Updating Theta parameters to {}".format(p))

        # Store the current accepted values before overwriting with new proposed values.
        self.flux_mean_last = self.flux_mean.copy()
        self.flux_std_last = self.flux_std.copy()
        self.eigenspectra_last = self.eigenspectra.copy()
        self.mus_last = self.mus
        self.C_GP_last = self.C_GP

        # Local, shifted copy of wavelengths
        wl_FFT = self.wl_FFT * np.sqrt((C.c_kms + p.vz) / (C.c_kms - p.vz))

        # If vsini is less than 0.2 km/s, we might run into issues with
        # the grid spacing. Therefore skip the convolution step if we have
        # values smaller than this.
        # FFT and convolve operations
        if p.vsini < 0.0:
            raise C.ModelError("vsini must be positive")
        elif p.vsini < 0.2:
            # Skip the vsini taper due to instrumental effects
            eigenspectra_full = self.EIGENSPECTRA.copy()
        else:
            FF = np.fft.rfft(self.EIGENSPECTRA, axis=1)

            # Determine the stellar broadening kernel
            ub = 2. * np.pi * p.vsini * self.ss
            sb = j1(ub) / ub - 3 * np.cos(ub) / (2 * ub ** 2) + 3. * np.sin(ub) / (2 * ub ** 3)
            # set zeroth frequency to 1 separately (DC term)
            sb[0] = 1.

            # institute vsini taper
            FF_tap = FF * sb

            # do ifft
            eigenspectra_full = np.fft.irfft(FF_tap, self.pca.npix, axis=1)

        # Spectrum resample operations
        if min(self.wl) < min(wl_FFT) or max(self.wl) > max(wl_FFT):
            raise RuntimeError("Data wl grid ({:.2f},{:.2f}) must fit within the range of wl_FFT ({:.2f},{:.2f})".format(min(self.wl), max(self.wl), min(wl_FFT), max(wl_FFT)))

        # Take the output from the FFT operation (eigenspectra_full), and stuff them
        # into respective data products
        for lres, hres in zip(chain([self.flux_mean, self.flux_std], self.eigenspectra), eigenspectra_full):
            interp = InterpolatedUnivariateSpline(wl_FFT, hres, k=5)
            lres[:] = interp(self.wl)
            del interp

        # Helps keep memory usage low, seems like the numpy routine is slow
        # to clear allocated memory for each iteration.
        gc.collect()

        # Adjust flux_mean and flux_std by Omega
        #Omega = 10**p.logOmega
        #self.flux_mean *= Omega
        #self.flux_std *= Omega

        # Now update the parameters from the emulator
        # If pars are outside the grid, Emulator will raise C.ModelError
        self.emulator.params = p.grid
        self.mus, self.C_GP = self.emulator.matrix
        self.flux_scalar = self.emulator.absolute_flux
        self.Omega = 10**p.logOmega
        self.flux_mean *= (self.Omega*self.flux_scalar)
        self.flux_std *= (self.Omega*self.flux_scalar)
示例#7
0
    def update_Theta(self, p):
        '''
        Update the model to the current Theta parameters.

        :param p: parameters to update model to
        :type p: model.ThetaParam
        '''

        # Dirty hack
        fix_logg = Starfish.config.get("fix_logg", None)
        if fix_logg is not None:
            p.grid[1] = fix_logg
        print("grid pars are", p.grid)

        self.logger.debug("Updating Theta parameters to {}".format(p))

        # Store the current accepted values before overwriting with new proposed values.
        self.flux_last = self.flux

        # Local, shifted copy of wavelengths
        wl_FFT = self.wl_FFT * np.sqrt((C.c_kms + p.vz) / (C.c_kms - p.vz))

        flux_raw = self.interpolator(p.grid)

        # If vsini is less than 0.2 km/s, we might run into issues with
        # the grid spacing. Therefore skip the convolution step if we have
        # values smaller than this.
        # FFT and convolve operations
        if p.vsini < 0.0:
            raise C.ModelError("vsini must be positive")
        elif p.vsini < 0.2:
            # Skip the vsini taper due to instrumental effects
            flux_taper = flux_raw
        else:
            FF = np.fft.rfft(flux_raw)

            # Determine the stellar broadening kernel
            ub = 2. * np.pi * p.vsini * self.ss
            sb = j1(ub) / ub - 3 * np.cos(ub) / (
                2 * ub**2) + 3. * np.sin(ub) / (2 * ub**3)
            # set zeroth frequency to 1 separately (DC term)
            sb[0] = 1.

            # institute vsini taper
            FF_tap = FF * sb

            # do ifft
            flux_taper = np.fft.irfft(FF_tap, len(self.wl_FFT))

        # Spectrum resample operations
        if min(self.wl) < min(wl_FFT) or max(self.wl) > max(wl_FFT):
            raise RuntimeError(
                "Data wl grid ({:.2f},{:.2f}) must fit within the range of wl_FFT ({:.2f},{:.2f})"
                .format(min(self.wl), max(self.wl), min(wl_FFT), max(wl_FFT)))

        # Take the output from the FFT operation and stuff it into the respective data products
        interp = InterpolatedUnivariateSpline(wl_FFT, flux_taper, k=5)
        self.flux = interp(self.wl)
        del interp

        gc.collect()

        # Adjust flux_mean and flux_std by Omega
        Omega = 10**p.logOmega
        self.flux *= Omega
示例#8
0
    def update_stellar(self, params):
        '''
        Update the model to the current stellar parameters.
        '''

        self.logger.debug("Updating stellar parameters to {}".format(params))

        # Store the current accepted values before overwriting with new proposed values.
        self.flux_mean_last = self.flux_mean
        self.flux_std_last = self.flux_std
        self.pcomps_last = self.pcomps
        self.mus_last, self.vars_last = self.mus, self.vars
        self.C_GP_last = self.C_GP

        #TODO: Possible speedups:
        # 1. Store the PCOMPS pre-FFT'd

        # Shift the velocity
        vz = params["vz"]
        # Local, shifted copy
        wl_FFT = self.wl_FFT * np.sqrt((C.c_kms + vz) / (C.c_kms - vz))

        # FFT and convolve operations
        vsini = params["vsini"]

        if vsini < 0.2:
            raise C.ModelError("vsini must be positive")

        FF = np.fft.rfft(self.PCOMPS, axis=1)

        # Determine the stellar broadening kernel
        ub = 2. * np.pi * vsini * self.ss
        sb = j1(ub) / ub - 3 * np.cos(ub) / (2 * ub**2) + 3. * np.sin(ub) / (
            2 * ub**3)
        # set zeroth frequency to 1 separately (DC term)
        sb[0] = 1.

        # institute velocity and instrumental taper
        FF_tap = FF * sb

        # do ifft
        pcomps_full = np.fft.irfft(FF_tap, len(wl_FFT), axis=1)

        # Spectrum resample operations
        if min(self.wl) < min(wl_FFT) or max(self.wl) > max(wl_FFT):
            raise RuntimeError(
                "Data wl grid ({:.2f},{:.2f}) must fit within the range of wl_FFT ({"
                ":.2f},{:.2f})".format(min(self.wl), max(self.wl), min(wl_FFT),
                                       max(wl_FFT)))

        # Take the output from the FFT operation (pcomps_full), and stuff them
        # into respective data products
        for lres, hres in zip(
                chain([self.flux_mean, self.flux_std], self.pcomps),
                pcomps_full):
            interp = InterpolatedUnivariateSpline(wl_FFT, hres, k=5)
            lres[:] = interp(self.wl)
            del interp

        gc.collect()

        # Adjust flux_mean and flux_std by Omega
        Omega = 10**params["logOmega"]
        self.flux_mean *= Omega
        self.flux_std *= Omega

        # Now update the parameters from the emulator
        pars = np.array([params["temp"], params["logg"], params["Z"]])

        # If pars are outside the grid, Emulator will raise C.ModelError
        self.mus, self.vars = self.Emulator(pars)

        self.C_GP = self.vars * np.eye(self.ncomp)
示例#9
0
def lnprob(p):
    vz, vsini, logOmega = p[:3]
    cheb = p[3:]

    chebyshevSpectrum.update(cheb)

    # Local, shifted copy of wavelengths
    wl_FFT = wl_FFT_orig * np.sqrt((C.c_kms + vz) / (C.c_kms - vz))

    # Holders to store the convolved and resampled eigenspectra
    eigenspectra = np.empty((pca.m, ndata))
    flux_mean = np.empty((ndata, ))
    flux_std = np.empty((ndata, ))

    # If vsini is less than 0.2 km/s, we might run into issues with
    # the grid spacing. Therefore skip the convolution step if we have
    # values smaller than this.
    # FFT and convolve operations
    if vsini < 0.0:
        raise C.ModelError("vsini must be positive")
    elif vsini < 0.2:
        # Skip the vsini taper due to instrumental effects
        eigenspectra_full = EIGENSPECTRA.copy()
    else:
        FF = np.fft.rfft(EIGENSPECTRA, axis=1)

        # Determine the stellar broadening kernel
        ub = 2. * np.pi * vsini * ss
        sb = j1(ub) / ub - 3 * np.cos(ub) / (2 * ub**2) + 3. * np.sin(ub) / (
            2 * ub**3)
        # set zeroth frequency to 1 separately (DC term)
        sb[0] = 1.

        # institute vsini taper
        FF_tap = FF * sb

        # do ifft
        eigenspectra_full = np.fft.irfft(FF_tap, pca.npix, axis=1)

    # Spectrum resample operations
    if min(wl) < min(wl_FFT) or max(wl) > max(wl_FFT):
        raise RuntimeError(
            "Data wl grid ({:.2f},{:.2f}) must fit within the range of wl_FFT ({:.2f},{:.2f})"
            .format(min(wl), max(wl), min(wl_FFT), max(wl_FFT)))

    # Take the output from the FFT operation (eigenspectra_full), and stuff them
    # into respective data products
    for lres, hres in zip(chain([flux_mean, flux_std], eigenspectra),
                          eigenspectra_full):
        interp = InterpolatedUnivariateSpline(wl_FFT, hres, k=5)
        lres[:] = interp(wl)
        del interp

    gc.collect()

    # Adjust flux_mean and flux_std by Omega
    Omega = 10**logOmega
    flux_mean *= Omega
    flux_std *= Omega

    # Get the mean spectrum
    X = (chebyshevSpectrum.k * flux_std * np.eye(ndata)).dot(eigenspectra.T)

    mean_spec = chebyshevSpectrum.k * flux_mean + X.dot(mus)
    R = fl - mean_spec

    # Evaluate chi2
    lnp = -0.5 * np.sum((R / sigma)**2)
    return [lnp, mean_spec, R]