Exemple #1
0
    def setup_class(self):
        self.model = Polynomial1D(2)
        self.x = np.arange(0, 10, 0.1)

        self.params = (2, 0.5, 3)
        # a simple polynomial
        self.y = self.params[0]
        self.y += self.params[1] * self.x
        self.y += self.params[2] * self.x**2

        self.y += np.random.uniform(-0.1, 0.1, self.x.size)

        sfit = SherpaFitter(statistic="cash", estmethod='covariance')
        sfit(self.model, self.x, self.y)
        self.sampler = sfit.get_sampler()
    def setup_class(self):
        self.model = Polynomial1D(2)
        self.x = np.arange(0, 10, 0.1)

        self.params = (2, 0.5, 3)
        # a simple polynomial
        self.y = self.params[0]
        self.y += self.params[1] * self.x
        self.y += self.params[2] * self.x ** 2

        self.y += np.random.uniform(-0.1, 0.1, self.x.size)

        sfit = SherpaFitter(statistic="cash", estmethod='covariance')
        sfit(self.model, self.x, self.y)
        self.sampler = sfit.get_sampler()
Exemple #3
0
    def test_bkg_doesnt_explode(self):
        """
        Check this goes through the motions
        """

        m = Polynomial1D(2)

        x = np.arange(0, 10, 0.1)
        y = 2 + 0.5 * x + 3 * x**2
        bkg = x

        sfit = SherpaFitter(statistic="cash", estmethod='covariance')
        sfit(m, x, y, bkg=bkg)
def fit_knot_unified(hdu, j1, j2, u0, lineid='nii'):

    NS, NV = hdu.data.shape
    w = WCS(hdu.header)
    vels, _ = w.all_pix2world(np.arange(NV), [0]*NV, 0)
    vels /= 1000.0

    # Ensure we don't go out of bounds
    j1 = max(j1, 0)
    j2 = min(j2, NS)
    print('Slit pixels {}:{} out of {}'.format(j1, j2, NS))

    knotspec = hdu.data[j1:j2, :].sum(axis=0)
    # make sure all pixels are positive, since that helps the fitting/plotting
    knotspec -= knotspec.min()

    # Levenberg-Marquardt for easy jobs
    lmfitter = SherpaFitter(statistic='chi2',
                            optimizer='levmar',
                            estmethod='confidence')

    # Simulated annealing for trickier jobs
    safitter = SherpaFitter(statistic='chi2',
                            optimizer='neldermead',
                            estmethod='covariance')

    # The idea is that this strategy should work for all knots

    # Estimate error from the BG: < -120 or > +100
    bgmask = np.abs(vels + 10.0) >= 110.0
    bgerr = np.std(knotspec[bgmask]) * np.ones_like(vels)

    # Define core as [-10, 50], or 20 +/- 30
    coremask = np.abs(vels - 20.0) < 30.0

    # Fit to the BG with constant plus Lorentz
    try: 
        vmean = np.average(vels[coremask], weights=knotspec[coremask])
    except ZeroDivisionError:
        vmean = 15.0

    bgmodel = lmfitter(_init_bgmodel(vmean),
		       vels[bgmask], knotspec[bgmask],
		       err=bgerr[bgmask])
    # Now freeze the BG model and add it to the initial core model
    #bgmodel['Lorentz'].fixed['amplitude'] = True
    #bgmodel['Constant'].fixed['amplitude'] = True

    # Increase the data err in the bright part of the line to mimic Poisson noise
    # Even though we don't know what the normalization is really, we will guess ...
    spec_err = bgerr + POISSON_SCALE*np.sqrt(knotspec)


    ## Now for the exciting bit, fit everything at once
    ##
    knotmask = np.abs(vels - u0) <= KNOT_WIDTH
    # For low-velocity knots, we need to exclude positive velocities
    # from the mask, since they will have large residual errors from
    # the core subtraction
    knotmask = knotmask & (vels < 0.0)

    # Start off with the frozen BG model
    fullmodel = bgmodel.copy()
    core_components = list(fullmodel.submodel_names)

    # Add in a model for the core
    DV_INIT = [-15.0, -5.0, 5.0, 10.0, 30.0]
    NCORE = len(DV_INIT)
    BASE_WIDTH = 10.0 if lineid == 'ha' else 5.0
    W_INIT = [BASE_WIDTH]*4 + [1.5*BASE_WIDTH]
    for i in range(NCORE):
        v0 = vmean + DV_INIT[i]
        w0 = W_INIT[i]
        component = 'G{}'.format(i)
        fullmodel += Gaussian1D(
            3.0, v0, w0,
            bounds={'amplitude': [0, None],
                    'mean': [v0 - 10, v0 + 10],
                    'stddev': [w0, 1.5*w0]},
            name=component)
        core_components.append(component)

    # Now, add in components for the knot to extract
    knotmodel_init = Gaussian1D(
        0.01, u0, BASE_WIDTH,
        # Allow +/- 10 km/s leeway around nominal knot velocity
        bounds={'amplitude': [0, None],
                'mean': [u0 - 10, u0 + 10],
                'stddev': [BASE_WIDTH, 25.0]},
        name='Knot')
    fullmodel += knotmodel_init
    knot_components = ['Knot']
    other_components = []

    # Depending on the knot velocity, we may need other components to
    # take up the slack too
    if u0 <= -75.0 or u0 >= -50.0:
        # Add in a generic fast knot
        fullmodel += Gaussian1D(
            0.01, -60.0, BASE_WIDTH,
            bounds={'amplitude': [0, None],
                    'mean': [-70.0, -50.0],
                    'stddev': [BASE_WIDTH, 25.0]},
            name='Fast other')
        other_components.append('Fast other')

    if u0 <= -50.0:
        # Add in a generic slow knot
        fullmodel += Gaussian1D(
            0.01, -30.0, BASE_WIDTH,
            bounds={'amplitude': [0, None],
                    'mean': [-40.0, -10.0],
                    'stddev': [BASE_WIDTH, 25.0]},
            name='Slow other')
        other_components.append('Slow other')

    if u0 >= -75.0:
        # Add in a very fast component
        fullmodel += Gaussian1D(
            0.001, -90.0, BASE_WIDTH,
            bounds={'amplitude': [0, None],
                    'mean': [-110.0, -75.0],
                    'stddev': [BASE_WIDTH, 25.0]},
            name='Ultra-fast other')
        other_components.append('Ultra-fast other')

    if u0 <= 30.0:
        # Add in a red-shifted component just in case
        fullmodel += Gaussian1D(
            0.01, 40.0, BASE_WIDTH,
            bounds={'amplitude': [0, None],
                    'mean': [30.0, 200.0],
                    'stddev': [BASE_WIDTH, 25.0]},
            name='Red other')
        other_components.append('Red other')




    # Moment of truth: fit models to data
    fullmodel = safitter(fullmodel, vels, knotspec, err=spec_err)
    full_fit_info = safitter.fit_info

    # Isolate the core+other model components 
    coremodel = fullmodel[core_components[0]]
    for component in core_components[1:] + other_components:
        coremodel += fullmodel[component]

    # Subtract the core model from the data
    residspec = knotspec - coremodel(vels)

    # Now re-fit the knot model to the residual

    # Calculate running std of residual spectrum
    NWIN = 11
    running_mean = generic_filter(residspec, np.mean, size=(NWIN,))
    running_std = generic_filter(residspec, np.std, size=(NWIN,))

    # Increase error estimate for data points where this is larger
    # than spec_err, but only for velocities that are not in knotmask
    residerr = bgerr
    # residerr = spec_err
    mask = (~knotmask) & (running_std > bgerr)
    residerr[mask] = running_std[mask]
    # The reason for this is so that poor modelling of the core is
    # accounted for in the errors.  Otherwise the reduced chi2 of the
    # knot model will be too high

    # Make an extended mask for fitting the knot, omitting the
    # redshifted half of the spectrum since it is irrelevant and we
    # don't want it to affect tha chi2 or the confidance intervals
    bmask = vels < 50.0

    knotmodel = lmfitter(knotmodel_init,
                         vels[bmask], residspec[bmask],
                         err=residerr[bmask])

    # Calculate the final residuals, which should be flat
    final_residual = residspec - knotmodel(vels)

    # Look at stddev of the final residuals and use them to rescale
    # the residual errors.  Then re-fit the knot with this better
    # estimate of the errors.  But only if rescaling would reduce the
    # data error estimate.
    residerr_rescale = final_residual[bmask].std() / residerr[bmask].mean()
    if residerr_rescale < 1.0:
        print('Rescaling data errors by', residerr_rescale)
        residerr *= residerr_rescale
        knotmodel = lmfitter(knotmodel,
                             vels[bmask], residspec[bmask],
                             err=residerr[bmask])
    else:
        residerr_rescale = 1.0

    knot_fit_info = lmfitter.fit_info
    lmfitter._fitter.estmethod.config['max_rstat'] = MAX_RSTAT
    if knot_fit_info.rstat < MAX_RSTAT:
        knot_fit_errors = lmfitter.est_errors(sigma=3)
    else:
        knot_fit_errors = None

    return {
        'nominal knot velocity': u0,
        'velocities': vels,
        'full profile': knotspec,
        'error profile': residerr,
        'core fit model': coremodel,
        'core fit profile': coremodel(vels),
        'core fit components': {k: coremodel[k](vels) for k in coremodel.submodel_names},
        'core fit info': full_fit_info,
        'core-subtracted profile': residspec,
        'knot fit model': knotmodel,
        'knot fit profile': knotmodel(vels),
        'knot fit info': knot_fit_info,
        'knot fit errors': knot_fit_errors,
        'error rescale factor': residerr_rescale,
        'knot j range': (j1, j2),
    }
def fit_knot(hdu, j1, j2, u0):

    NS, NV = hdu.data.shape
    w = WCS(hdu.header)
    vels, _ = w.all_pix2world(np.arange(NV), [0]*NV, 0)
    vels /= 1000.0

    # Ensure we don't go out of bounds
    j1 = max(j1, 0)
    j2 = min(j2, NS)
    print('Slit pixels {}:{} out of {}'.format(j1, j2, NS))

    knotspec = hdu.data[j1:j2, :].sum(axis=0)
    # make sure all pixels are positive, since that helps the fitting/plotting
    knotspec -= knotspec.min()

    # Levenberg-Marquardt for easy jobs
    lmfitter = SherpaFitter(statistic='chi2',
                            optimizer='levmar',
                            estmethod='confidence')
    # Simulated annealing for trickier jobs
    safitter = SherpaFitter(statistic='chi2',
                            optimizer='neldermead',
                            estmethod='covariance')

    # First do the strategy for typical knots (u0 = [-30, -80])

    # Estimate error from the BG: < -120 or > +100
    bgmask = np.abs(vels + 10.0) >= 110.0
    bgerr = np.std(knotspec[bgmask]) * np.ones_like(vels)

    # Fit to the BG with constant plus Lorentz
    try: 
        vmean = np.average(vels, weights=knotspec)
    except ZeroDivisionError:
        vmean = 15.0

    bgmodel = lmfitter(_init_bgmodel(vmean),
		       vels[bgmask], knotspec[bgmask],
		       err=bgerr[bgmask])
    # Now freeze the BG model and add it to the initial core model
    bgmodel['Lorentz'].fixed['amplitude'] = True
    bgmodel['Constant'].fixed['amplitude'] = True

    # Increase the data err in the bright part of the line to mimic Poisson noise
    # Even though we don't know what the normalization is really, we will guess ...
    spec_err = bgerr + POISSON_SCALE*np.sqrt(knotspec)

    # Fit to the line core
    knotmask = np.abs(vels - u0) <= KNOT_WIDTH
    coremodel = safitter(_init_coremodel() + bgmodel,
                         vels[~knotmask], knotspec[~knotmask],
                         err=spec_err[~knotmask])
    core_fit_info = safitter.fit_info

    # Residual should contain just knot
    residspec = knotspec - coremodel(vels)

    # Calculate running std of residual spectrum
    NWIN = 11
    running_mean = generic_filter(residspec, np.mean, size=(NWIN,))
    running_std = generic_filter(residspec, np.std, size=(NWIN,))

    # Increase error estimate for data points where this is larger
    # than spec_err, but only for velocities that are not in knotmask
    residerr = bgerr
    # residerr = spec_err
    mask = (~knotmask) & (running_std > bgerr)
    residerr[mask] = running_std[mask]
    # The reason for this is so that poor modelling of the core is
    # accounted for in the errors.  Otherwise the reduced chi2 of the
    # knot model will be too high

    # Make an extended mask for fitting the knot, omitting the
    # redshifted half of the spectrum since it is irrelevant and we
    # don't want it to affect tha chi2 or the confidance intervals
    bmask = vels < 50.0

    # Fit single Gaussian to knot 
    amplitude_init = residspec[knotmask].max()
    if amplitude_init < 0.0:
        # ... pure desperation here
        amplitude_init = residspec[bmask].max()
    knotmodel = lmfitter(_init_knotmodel(amplitude_init, u0),
                         vels[bmask], residspec[bmask],
                         err=residerr[bmask])

    # Calculate the final residuals, which should be flat
    final_residual = residspec - knotmodel(vels)

    # Look at stddev of the final residuals and use them to rescale
    # the residual errors.  Then re-fit the knot with this better
    # estimate of the errors.  But only if rescaling would reduce the
    # data error estimate.
    residerr_rescale = final_residual[bmask].std() / residerr[bmask].mean()
    if residerr_rescale < 1.0:
        print('Rescaling data errors by', residerr_rescale)
        residerr *= residerr_rescale
        knotmodel = lmfitter(knotmodel,
                             vels[bmask], residspec[bmask],
                             err=residerr[bmask])
    else:
        residerr_rescale = 1.0

    knot_fit_info = lmfitter.fit_info
    lmfitter._fitter.estmethod.config['max_rstat'] = MAX_RSTAT
    if knot_fit_info.rstat < MAX_RSTAT:
        knot_fit_errors = lmfitter.est_errors(sigma=3)
    else:
        knot_fit_errors = None

    return {
        'nominal knot velocity': u0,
        'velocities': vels,
        'full profile': knotspec,
        'error profile': residerr,
        'core fit model': coremodel,
        'core fit profile': coremodel(vels),
        'core fit components': {k: coremodel[k](vels) for k in coremodel.submodel_names},
        'core fit info': core_fit_info,
        'core-subtracted profile': residspec,
        'knot fit model': knotmodel,
        'knot fit profile': knotmodel(vels),
        'knot fit info': knot_fit_info,
        'knot fit errors': knot_fit_errors,
        'error rescale factor': residerr_rescale,
    }
def fit_knot_unified(hdu, j1, j2, u0, lineid='nii'):

    NS, NV = hdu.data.shape
    w = WCS(hdu.header)
    vels, _ = w.all_pix2world(np.arange(NV), [0] * NV, 0)
    vels /= 1000.0

    # Ensure we don't go out of bounds
    j1 = max(j1, 0)
    j2 = min(j2, NS)
    print('Slit pixels {}:{} out of {}'.format(j1, j2, NS))

    knotspec = hdu.data[j1:j2, :].sum(axis=0)
    # make sure all pixels are positive, since that helps the fitting/plotting
    knotspec -= knotspec.min()

    # Levenberg-Marquardt for easy jobs
    lmfitter = SherpaFitter(statistic='chi2',
                            optimizer='levmar',
                            estmethod='confidence')

    # Simulated annealing for trickier jobs
    safitter = SherpaFitter(statistic='chi2',
                            optimizer='neldermead',
                            estmethod='covariance')

    # The idea is that this strategy should work for all knots

    # Estimate error from the BG: < -120 or > +100
    bgmask = np.abs(vels + 10.0) >= 110.0
    bgerr = np.std(knotspec[bgmask]) * np.ones_like(vels)

    # Define core as [-10, 50], or 20 +/- 30
    coremask = np.abs(vels - 20.0) < 30.0

    # Fit to the BG with constant plus Lorentz
    try:
        vmean = np.average(vels[coremask], weights=knotspec[coremask])
    except ZeroDivisionError:
        vmean = 15.0

    bgmodel = lmfitter(_init_bgmodel(vmean),
                       vels[bgmask],
                       knotspec[bgmask],
                       err=bgerr[bgmask])
    # Now freeze the BG model and add it to the initial core model
    #bgmodel['Lorentz'].fixed['amplitude'] = True
    #bgmodel['Constant'].fixed['amplitude'] = True

    # Increase the data err in the bright part of the line to mimic Poisson noise
    # Even though we don't know what the normalization is really, we will guess ...
    spec_err = bgerr + POISSON_SCALE * np.sqrt(knotspec)

    ## Now for the exciting bit, fit everything at once
    ##
    knotmask = np.abs(vels - u0) <= KNOT_WIDTH
    # For low-velocity knots, we need to exclude positive velocities
    # from the mask, since they will have large residual errors from
    # the core subtraction
    knotmask = knotmask & (vels < 0.0)

    # Start off with the frozen BG model
    fullmodel = bgmodel.copy()
    core_components = list(fullmodel.submodel_names)

    # Add in a model for the core
    DV_INIT = [-15.0, -5.0, 5.0, 10.0, 30.0]
    NCORE = len(DV_INIT)
    BASE_WIDTH = 10.0 if lineid == 'ha' else 5.0
    W_INIT = [BASE_WIDTH] * 4 + [1.5 * BASE_WIDTH]
    for i in range(NCORE):
        v0 = vmean + DV_INIT[i]
        w0 = W_INIT[i]
        component = 'G{}'.format(i)
        fullmodel += Gaussian1D(3.0,
                                v0,
                                w0,
                                bounds={
                                    'amplitude': [0, None],
                                    'mean': [v0 - 10, v0 + 10],
                                    'stddev': [w0, 1.5 * w0]
                                },
                                name=component)
        core_components.append(component)

    # Now, add in components for the knot to extract
    knotmodel_init = Gaussian1D(
        0.01,
        u0,
        BASE_WIDTH,
        # Allow +/- 10 km/s leeway around nominal knot velocity
        bounds={
            'amplitude': [0, None],
            'mean': [u0 - 10, u0 + 10],
            'stddev': [BASE_WIDTH, 25.0]
        },
        name='Knot')
    fullmodel += knotmodel_init
    knot_components = ['Knot']
    other_components = []

    # Depending on the knot velocity, we may need other components to
    # take up the slack too
    if u0 <= -75.0 or u0 >= -50.0:
        # Add in a generic fast knot
        fullmodel += Gaussian1D(0.01,
                                -60.0,
                                BASE_WIDTH,
                                bounds={
                                    'amplitude': [0, None],
                                    'mean': [-70.0, -50.0],
                                    'stddev': [BASE_WIDTH, 25.0]
                                },
                                name='Fast other')
        other_components.append('Fast other')

    if u0 <= -50.0:
        # Add in a generic slow knot
        fullmodel += Gaussian1D(0.01,
                                -30.0,
                                BASE_WIDTH,
                                bounds={
                                    'amplitude': [0, None],
                                    'mean': [-40.0, -10.0],
                                    'stddev': [BASE_WIDTH, 25.0]
                                },
                                name='Slow other')
        other_components.append('Slow other')

    if u0 >= -75.0:
        # Add in a very fast component
        fullmodel += Gaussian1D(0.001,
                                -90.0,
                                BASE_WIDTH,
                                bounds={
                                    'amplitude': [0, None],
                                    'mean': [-110.0, -75.0],
                                    'stddev': [BASE_WIDTH, 25.0]
                                },
                                name='Ultra-fast other')
        other_components.append('Ultra-fast other')

    if u0 <= 30.0:
        # Add in a red-shifted component just in case
        fullmodel += Gaussian1D(0.01,
                                40.0,
                                BASE_WIDTH,
                                bounds={
                                    'amplitude': [0, None],
                                    'mean': [30.0, 200.0],
                                    'stddev': [BASE_WIDTH, 25.0]
                                },
                                name='Red other')
        other_components.append('Red other')

    # Moment of truth: fit models to data
    fullmodel = safitter(fullmodel, vels, knotspec, err=spec_err)
    full_fit_info = safitter.fit_info

    # Isolate the core+other model components
    coremodel = fullmodel[core_components[0]]
    for component in core_components[1:] + other_components:
        coremodel += fullmodel[component]

    # Subtract the core model from the data
    residspec = knotspec - coremodel(vels)

    # Now re-fit the knot model to the residual

    # Calculate running std of residual spectrum
    NWIN = 11
    running_mean = generic_filter(residspec, np.mean, size=(NWIN, ))
    running_std = generic_filter(residspec, np.std, size=(NWIN, ))

    # Increase error estimate for data points where this is larger
    # than spec_err, but only for velocities that are not in knotmask
    residerr = bgerr
    # residerr = spec_err
    mask = (~knotmask) & (running_std > bgerr)
    residerr[mask] = running_std[mask]
    # The reason for this is so that poor modelling of the core is
    # accounted for in the errors.  Otherwise the reduced chi2 of the
    # knot model will be too high

    # Make an extended mask for fitting the knot, omitting the
    # redshifted half of the spectrum since it is irrelevant and we
    # don't want it to affect tha chi2 or the confidance intervals
    bmask = vels < 50.0

    knotmodel = lmfitter(knotmodel_init,
                         vels[bmask],
                         residspec[bmask],
                         err=residerr[bmask])

    # Calculate the final residuals, which should be flat
    final_residual = residspec - knotmodel(vels)

    # Look at stddev of the final residuals and use them to rescale
    # the residual errors.  Then re-fit the knot with this better
    # estimate of the errors.  But only if rescaling would reduce the
    # data error estimate.
    residerr_rescale = final_residual[bmask].std() / residerr[bmask].mean()
    if residerr_rescale < 1.0:
        print('Rescaling data errors by', residerr_rescale)
        residerr *= residerr_rescale
        knotmodel = lmfitter(knotmodel,
                             vels[bmask],
                             residspec[bmask],
                             err=residerr[bmask])
    else:
        residerr_rescale = 1.0

    knot_fit_info = lmfitter.fit_info
    lmfitter._fitter.estmethod.config['max_rstat'] = MAX_RSTAT
    if knot_fit_info.rstat < MAX_RSTAT:
        knot_fit_errors = lmfitter.est_errors(sigma=3)
    else:
        knot_fit_errors = None

    return {
        'nominal knot velocity': u0,
        'velocities': vels,
        'full profile': knotspec,
        'error profile': residerr,
        'core fit model': coremodel,
        'core fit profile': coremodel(vels),
        'core fit components':
        {k: coremodel[k](vels)
         for k in coremodel.submodel_names},
        'core fit info': full_fit_info,
        'core-subtracted profile': residspec,
        'knot fit model': knotmodel,
        'knot fit profile': knotmodel(vels),
        'knot fit info': knot_fit_info,
        'knot fit errors': knot_fit_errors,
        'error rescale factor': residerr_rescale,
        'knot j range': (j1, j2),
    }
def fit_knot(hdu, j1, j2, u0):

    NS, NV = hdu.data.shape
    w = WCS(hdu.header)
    vels, _ = w.all_pix2world(np.arange(NV), [0] * NV, 0)
    vels /= 1000.0

    # Ensure we don't go out of bounds
    j1 = max(j1, 0)
    j2 = min(j2, NS)
    print('Slit pixels {}:{} out of {}'.format(j1, j2, NS))

    knotspec = hdu.data[j1:j2, :].sum(axis=0)
    # make sure all pixels are positive, since that helps the fitting/plotting
    knotspec -= knotspec.min()

    # Levenberg-Marquardt for easy jobs
    lmfitter = SherpaFitter(statistic='chi2',
                            optimizer='levmar',
                            estmethod='confidence')
    # Simulated annealing for trickier jobs
    safitter = SherpaFitter(statistic='chi2',
                            optimizer='neldermead',
                            estmethod='covariance')

    # First do the strategy for typical knots (u0 = [-30, -80])

    # Estimate error from the BG: < -120 or > +100
    bgmask = np.abs(vels + 10.0) >= 110.0
    bgerr = np.std(knotspec[bgmask]) * np.ones_like(vels)

    # Fit to the BG with constant plus Lorentz
    try:
        vmean = np.average(vels, weights=knotspec)
    except ZeroDivisionError:
        vmean = 15.0

    bgmodel = lmfitter(_init_bgmodel(vmean),
                       vels[bgmask],
                       knotspec[bgmask],
                       err=bgerr[bgmask])
    # Now freeze the BG model and add it to the initial core model
    bgmodel['Lorentz'].fixed['amplitude'] = True
    bgmodel['Constant'].fixed['amplitude'] = True

    # Increase the data err in the bright part of the line to mimic Poisson noise
    # Even though we don't know what the normalization is really, we will guess ...
    spec_err = bgerr + POISSON_SCALE * np.sqrt(knotspec)

    # Fit to the line core
    knotmask = np.abs(vels - u0) <= KNOT_WIDTH
    coremodel = safitter(_init_coremodel() + bgmodel,
                         vels[~knotmask],
                         knotspec[~knotmask],
                         err=spec_err[~knotmask])
    core_fit_info = safitter.fit_info

    # Residual should contain just knot
    residspec = knotspec - coremodel(vels)

    # Calculate running std of residual spectrum
    NWIN = 11
    running_mean = generic_filter(residspec, np.mean, size=(NWIN, ))
    running_std = generic_filter(residspec, np.std, size=(NWIN, ))

    # Increase error estimate for data points where this is larger
    # than spec_err, but only for velocities that are not in knotmask
    residerr = bgerr
    # residerr = spec_err
    mask = (~knotmask) & (running_std > bgerr)
    residerr[mask] = running_std[mask]
    # The reason for this is so that poor modelling of the core is
    # accounted for in the errors.  Otherwise the reduced chi2 of the
    # knot model will be too high

    # Make an extended mask for fitting the knot, omitting the
    # redshifted half of the spectrum since it is irrelevant and we
    # don't want it to affect tha chi2 or the confidance intervals
    bmask = vels < 50.0

    # Fit single Gaussian to knot
    amplitude_init = residspec[knotmask].max()
    if amplitude_init < 0.0:
        # ... pure desperation here
        amplitude_init = residspec[bmask].max()
    knotmodel = lmfitter(_init_knotmodel(amplitude_init, u0),
                         vels[bmask],
                         residspec[bmask],
                         err=residerr[bmask])

    # Calculate the final residuals, which should be flat
    final_residual = residspec - knotmodel(vels)

    # Look at stddev of the final residuals and use them to rescale
    # the residual errors.  Then re-fit the knot with this better
    # estimate of the errors.  But only if rescaling would reduce the
    # data error estimate.
    residerr_rescale = final_residual[bmask].std() / residerr[bmask].mean()
    if residerr_rescale < 1.0:
        print('Rescaling data errors by', residerr_rescale)
        residerr *= residerr_rescale
        knotmodel = lmfitter(knotmodel,
                             vels[bmask],
                             residspec[bmask],
                             err=residerr[bmask])
    else:
        residerr_rescale = 1.0

    knot_fit_info = lmfitter.fit_info
    lmfitter._fitter.estmethod.config['max_rstat'] = MAX_RSTAT
    if knot_fit_info.rstat < MAX_RSTAT:
        knot_fit_errors = lmfitter.est_errors(sigma=3)
    else:
        knot_fit_errors = None

    return {
        'nominal knot velocity': u0,
        'velocities': vels,
        'full profile': knotspec,
        'error profile': residerr,
        'core fit model': coremodel,
        'core fit profile': coremodel(vels),
        'core fit components':
        {k: coremodel[k](vels)
         for k in coremodel.submodel_names},
        'core fit info': core_fit_info,
        'core-subtracted profile': residspec,
        'knot fit model': knotmodel,
        'knot fit profile': knotmodel(vels),
        'knot fit info': knot_fit_info,
        'knot fit errors': knot_fit_errors,
        'error rescale factor': residerr_rescale,
    }
Exemple #8
0
    def setup_class(self):
        # make data and models to use later!
        err = 0.1

        self.x1 = np.arange(1, 10, .1)
        self.dx1 = np.ones(self.x1.shape) * (.1 / 2.)
        self.x2 = np.arange(1, 10, .05)
        self.dx2 = np.ones(self.x2.shape) * (.05 / 2.)

        self.model1d = Gaussian1D(mean=5, amplitude=10, stddev=0.8)
        self.model1d_2 = Gaussian1D(mean=4, amplitude=5, stddev=0.2)

        self.tmodel1d = self.model1d.copy()
        self.tmodel1d_2 = self.model1d_2.copy()

        self.y1 = self.model1d(self.x1)
        self.y1 += err * np.random.uniform(-1., 1., size=self.y1.size)
        self.dy1 = err * np.random.uniform(0.5, 1., size=self.y1.size)

        self.y2 = self.model1d_2(self.x2)
        self.y2 += err * np.random.uniform(-1., 1., size=self.y2.size)
        self.dy2 = err * np.random.uniform(0.5, 1., size=self.y2.size)

        self.model1d.mean = 4
        self.model1d.amplitude = 6
        self.model1d.stddev = 0.5

        self.model1d_2.mean = 5
        self.model1d_2.amplitude = 10
        self.model1d_2.stddev = 0.3

        self.xx2, self.xx1 = np.mgrid[2:12:1, 2:12:1]
        self.shape = self.xx2.shape
        self.xx1 = self.xx1.flatten()
        self.xx2 = self.xx2.flatten()

        self.model2d = Gaussian2D(amplitude=10,
                                  x_mean=5,
                                  y_mean=6,
                                  x_stddev=0.8,
                                  y_stddev=1.1)
        self.model2d.theta.fixed = True

        self.yy = self.model2d(self.xx1, self.xx2)
        self.dxx1 = err * np.random.uniform(0.5, 1., size=self.xx1.size)
        self.dxx2 = err * np.random.uniform(0.5, 1., size=self.xx2.size)
        self.dyy = err * np.random.uniform(0.5, 1., size=self.yy.size)

        self.tmodel2d = self.model2d.copy()
        self.model2d.amplitude = 5
        self.model2d.x_mean = 6
        self.model2d.y_mean = 5
        self.model2d.x_stddev = 0.2
        self.model2d.y_stddev = 0.7

        # to stop stddev going negitive and getting div by zero error
        self.model1d.stddev.min = 1e-99
        self.model1d_2.stddev.min = 1e-99
        self.model2d.x_stddev.min = 1e-99
        self.model2d.y_stddev.min = 1e-99

        #Lets define some tophats
        self.rsp1 = np.zeros_like(self.x1)
        self.rsp1[(self.x1 > 4) & (self.x1 < 6)] = 1
        self.rsp2 = np.zeros_like(self.x2)
        self.rsp2[(self.x2 > 4) & (self.x2 < 6)] = 1

        self.rsp2d = np.zeros_like(self.xx1)
        self.rsp2d[(self.xx1 > 4) & (self.xx1 < 6) & (self.xx2 > 4) &
                   (self.xx2 < 6)] = 1
        self.rsp2d.flatten()
        self.fitter = SherpaFitter(statistic="Chi2")
Exemple #9
0
def fit_lines_sherpa(spec_file,
                     z_init=0.,
                     file_out=None,
                     do_plot=True,
                     monte_carlo=False):
    """Fit an HII region spectrum using Sherpa package.    """

    # from astropy.modeling.fitting import SherpaFitter
    from saba import SherpaFitter
    import matplotlib.pyplot as plt
    from linetools.spectra.xspectrum1d import XSpectrum1D

    # Redshift scale:
    scale_factor = (1. + z_init)

    # Read in the spectrum. **ASSUME VACUUM WAVELENGTHS?**
    mods_spec = XSpectrum1D.from_file(spec_file)

    # Set up a convenient wavelength, flux, error arrays
    wave = mods_spec.wavelength.value
    flux = mods_spec.flux.value
    err = mods_spec.sig.value

    ###### ------ FOR TESTING!! ------
    ### To test this, let's constrain ourselves to only the wavelengths between ~Hbeta, OIII
    # g = np.where((wave >= 4000) & (wave <= 5400.))
    # wave = wave[g]
    # flux = flux[g]
    # err = err[g]

    # Load the data for the lines to be fit. Starts with MANGA line list, modified for MODS.
    line_data = get_linelist()
    # Exclude lines outside of the wavelength coverage.
    keep_lines = np.where(
        (line_data['lambda'] <= np.max(wave) / scale_factor)
        & (line_data['lambda'] >= np.min(wave) / scale_factor))[0]
    keep_line_index = line_data['indx'][keep_lines]

    # For now...debugging. jch
    keep_lines = np.array(len(line_data))
    keep_line_index = line_data['indx'][keep_lines]

    ##### MODEL DEFINITIONS
    # Define initial parameters
    amplitude_init = 0.1 * np.max(mods_spec.flux)
    stddev_init = 1.5

    amplitude_bounds, stddev_bounds, velocity_range = _define_bounds()

    # Calculate the redshift delta
    z_bounds_scale = (velocity_range / c.c.to('km/s').value) * scale_factor
    z_bounds = (z_init - z_bounds_scale, z_init + z_bounds_scale)

    # Define a joint model as the sums of Gaussians for each line
    #  Gaussian for first line:
    j = 0
    wave0 = line_data['lambda'][j]
    line_center = wave0 * scale_factor
    model_name = np.str(line_data['name'][j])
    # Here we use a custom Gaussian class to  fix redshifts together
    joint_model = GaussianEmission(amplitude=amplitude_init,
                                   redshift=z_init,
                                   stddev=stddev_init,
                                   wave0=wave0,
                                   name=model_name)
    # The rest wavelength is not a free parameter:
    joint_model.wave0.fixed = True

    #  Loop through the remaining lines:
    for j in np.arange(1, np.size(line_data)):
        wave0 = line_data['lambda'][j]
        line_center = wave0 * scale_factor
        model_name = np.str(line_data['name'][j])

        joint_model += GaussianEmission(amplitude=amplitude_init,
                                        redshift=z_init,
                                        stddev=stddev_init,
                                        wave0=wave0,
                                        name=model_name)

    # Extract the model names:
    model_names = joint_model.submodel_names

    # Now we have to loop through the same models, applying the bounds:
    for mdlnms in model_names:
        joint_model[mdlnms].bounds['amplitude'] = amplitude_bounds
        joint_model[mdlnms].bounds['redshift'] = z_bounds
        joint_model[mdlnms].bounds['stddev'] = stddev_bounds
        # The rest wavelength is not a free parameter:
        joint_model[mdlnms].wave0.fixed = True

    # TODO Get tied parameters to work.
    # Tie some parameters together, checking that reference lines
    #  are actually covered by the spectrum:
    for k in np.arange(0, np.size(line_data)):
        mdlnm = model_names[k]
        if (line_data['mode'][k] == 't33') & (np.in1d(33, keep_line_index)):
            joint_model[mdlnm].stddev.tied = _tie_sigma_4862
            joint_model[mdlnm].redshift.tied = _tie_redshift_4862
        elif (line_data['mode'][k] == 't35') & (np.in1d(35, keep_line_index)):
            joint_model[mdlnm].stddev.tied = _tie_sigma_5008
            joint_model[mdlnm].redshift.tied = _tie_redshift_5008
        elif (line_data['mode'][k] == 't45') & (np.in1d(45, keep_line_index)):
            joint_model[mdlnm].stddev.tied = _tie_sigma_6585
            joint_model[mdlnm].redshift.tied = _tie_redshift_6585
        elif (line_data['mode'][k] == 't46') & (np.in1d(46, keep_line_index)):
            joint_model[mdlnm].stddev.tied = _tie_sigma_6718
            joint_model[mdlnm].redshift.tied = _tie_redshift_6718

        # 3727/3729 lines:
        if mdlnm == '[OII]3727':
            import IPython
            IPython.embed()

            joint_model[mdlnm].stddev.tied = _tie_sigma_3729
            joint_model[mdlnm].redshift.tied = _tie_redshift_3729

        # Tie amplitudes of doublets
        if (line_data['line'][k] == 'd35') & (np.in1d(35, keep_line_index)):
            joint_model[mdlnm].amplitude.tied = _tie_ampl_5008  # 4959/5008
        if (line_data['line'][k] == 'd45') & (np.in1d(45, keep_line_index)):
            joint_model[mdlnm].amplitude.tied = _tie_ampl_6585  # 6549/6585

    ##### FITTING
    # Sherpa model fitting from SABA package
    sfit = SherpaFitter(statistic='chi2',
                        optimizer='levmar',
                        estmethod='confidence')
    sfit_lm = SherpaFitter(statistic='chi2',
                           optimizer='neldermead',
                           estmethod='confidence')
    sfit_mc = SherpaFitter(statistic='chi2',
                           optimizer='moncar',
                           estmethod='confidence')
    # Do the fit
    sfitted_model = sfit(joint_model, wave, flux, err=err)
    # Refine with different optimizer
    temp_model = sfitted_model.copy()
    sfitted_model = sfit_lm(temp_model, wave, flux, err=err)

    if monte_carlo:
        # If requested, do a second fit with the very slow Monte Carlo approach
        sfitted_model = sfit_mc(sfitted_model.copy(), wave, flux, err=err)

    # Create the fitted flux array
    sfitted_flux = sfitted_model(wave)

    # TODO Get error estimates from Sherpa
    # Work out the errors...
    #sfit.est_config['maxiters']=200
    #sfitted_err = sfit.est_errors(sigma=3)

    # Plot the results
    if do_plot:
        plt.clf()
        plt.plot(wave, flux, drawstyle='steps-mid', linewidth=2)
        plt.plot(wave, sfitted_flux, color='orange', linewidth=2)

    ##### Create integrated fluxes and errors
    # The integration range is over +/-stddev * int_delta_factor
    int_delta_factor = _define_integration_delta()
    output_construct = 0

    for j in np.arange(np.size(line_data)):
        # Calculate integrated fluxes, errors;
        #  -- First test that the lines are in the range covered by data
        if np.in1d(j, keep_lines):
            mean_lambda = line_data[j]['lambda'] * (1. +
                                                    sfitted_model[j].redshift)

            #    deal with blended O II 3727/3729 doublet
            if line_data[j]['name'] == '[OII]3727':
                # Calculate the integrated fluxes and errors
                iflux, ierr = integrate_line_flux(wave,
                                                  flux,
                                                  err,
                                                  mean_lambda,
                                                  sfitted_model[j].stddev *
                                                  int_delta_factor,
                                                  line3727=True)
            elif line_data[j]['name'] == '[OII]3729':
                # pdb.set_trace()
                # For 3729, use the flux derived for 3726
                iflux = output_table['int_flux'][j - 1]
                #iflux = 0.
                # For 3729, use its own error. This is appropriate for the fitted errors of both lines
                crap, ierr = integrate_line_flux(
                    wave, flux, err, mean_lambda,
                    sfitted_model[j].stddev * int_delta_factor)
            else:
                # Calculate the integrated fluxes and errors
                iflux, ierr = integrate_line_flux(
                    wave, flux, err, mean_lambda,
                    sfitted_model[j].stddev * int_delta_factor)

            redshift_out = (sfitted_model[j].redshift)[0]
            sfitted_flux_out = np.sqrt(
                2. *
                np.pi) * sfitted_model[j].amplitude * sfitted_model[j].stddev

            if output_construct == 0:
                # Define and construct the initial table to hold the results
                output_col_names, output_format, output_dtype = _define_output_table(
                )
                output_data = [[line_data[j]['name']], [line_data[j]['ion']],
                               [line_data[j]['lambda']],
                               [line_data[j]['indx']], [line_data[j]['mode']],
                               [mean_lambda],
                               [sfitted_model[j].amplitude.value],
                               [sfitted_model[j].stddev.value], [redshift_out],
                               [sfitted_flux_out], [iflux], [ierr],
                               [iflux / ierr]]
                output_table = Table(output_data,
                                     names=output_col_names,
                                     dtype=output_dtype)

                output_construct = 1
            else:
                output_table.add_row([
                    line_data[j]['name'], line_data[j]['ion'],
                    line_data[j]['lambda'], line_data[j]['indx'],
                    line_data[j]['mode'], mean_lambda,
                    sfitted_model[j].amplitude.value,
                    sfitted_model[j].stddev.value, redshift_out,
                    sfitted_flux_out, iflux, ierr, iflux / ierr
                ])

    # Set the output format of the results table:
    colnames = output_table.colnames
    for j in np.arange(np.size(colnames)):
        output_table[colnames[j]].format = output_format[j]

    # Set up the spectral table:
    spec_table = Table([wave, flux, err, sfitted_flux],
                       names=['wave', 'flux', 'err', 'spec_fit'])

    # Write summary FITS files
    if file_out is None:
        file_base = spec_file.strip('.fits')
    else:
        file_base = file_out

    table_file = file_base + '.HIIFitTable.fits'
    fit_file = file_base + '.HIIFitSpec.fits'

    output_table.write(table_file, overwrite=True)
    spec_table.write(fit_file, overwrite=True)

    return output_table