Пример #1
0
    def plot_points(self, axes, **kwargs):
        """ Plot the SED using matpotlib. """
        data_kwargs = dict(color='black')
        data_kwargs.update(kwargs)

        edict = self.results['Energy']
        fdict = self.results['dNdE']

        file_energy_units = units.fromstring(edict['Units'])
        file_flux_units = units.fromstring(fdict['Units'])

        ce = lambda x: units.convert(np.asarray(x), file_energy_units, axes.
                                     energy_units_obj)

        # get energy part
        energy = ce(edict['Value'])
        if 'Lower' in edict and 'Upper' in edict:
            lower_energy = ce(edict['Lower'])
            upper_energy = ce(edict['Upper'])
            has_energy_errors = True
        else:
            has_energy_errors = False

        # get spectral part

        cf = lambda y: units.convert(
            energy**2 * np.asarray(y), axes.energy_units_obj**2 *
            file_flux_units, axes.flux_units_obj / units.cm**2 / units.s)

        dnde = cf(fdict['Value'])

        if 'Lower_Error' in fdict and 'Upper_Error' in fdict:
            # assymetric errors
            dnde_lower_err = cf(fdict['Lower_Error'])
            dnde_upper_err = cf(fdict['Upper_Error'])
            has_assymetric_errors = True
        else:
            has_assymetric_errors = False
            dnde_err = cf(fdict['Average_Error'])

        # get limits, otherwise assume all significant
        if 'Upper_Limit' in fdict and 'Significant' in self.results:
            dnde_ul = cf(fdict['Upper_Limit'])
            significant = np.asarray(self.results['Significant'])
            has_upper_limits = True
        else:
            has_upper_limits = False

        plot_points(
            x=energy,
            xlo=lower_energy if has_energy_errors else None,
            xhi=upper_energy if has_energy_errors else None,
            y=dnde,
            y_lower_err=dnde_lower_err if has_assymetric_errors else dnde_err,
            y_upper_err=dnde_upper_err if has_assymetric_errors else dnde_err,
            y_ul=dnde_ul if has_upper_limits else None,
            significant=significant
            if has_upper_limits else np.ones(len(energy), dtype=bool),
            axes=axes,
            **data_kwargs)
Пример #2
0
    def pointlike_sed_to_dict(bandflux, flux_units='erg', energy_units='MeV'):
        results = defaultdict(lambda:defaultdict(list))
        
        ce=lambda e: units.convert(e,'MeV',energy_units)
        cp = lambda e: units.convert(e,'1/MeV','1/%s' % flux_units)

        results['Energy']['Units'] = energy_units
        results['dNdE']['Units'] = 'ph/cm^2/s/%s' % flux_units

        results['Significant'] = []
        for r in bandflux.rec:
            
            results['Energy']['Lower'].append(ce(r.elow))
            results['Energy']['Upper'].append(ce(r.ehigh))
            results['Energy']['Value'].append(ce(np.sqrt(r.elow*r.ehigh)))

            # Undo scaling in the bandflux recarray
            fac = r.elow*r.ehigh*bandflux.scale_factor

            if r.flux > 0:
                results['Significant'].append(True)
                results['dNdE']['Value'].append(cp(r.flux/fac))
                results['dNdE']['Average_Error'].append(cp((r.uflux/fac - r.lflux/fac)/2))
                results['dNdE']['Lower_Error'].append(cp((r.flux-r.lflux)/fac))
                results['dNdE']['Upper_Error'].append(cp((r.uflux-r.flux)/fac))
                results['dNdE']['Upper_Limit'].append(np.nan)
            else:
                results['Significant'].append(False)
                results['dNdE']['Value'].append(np.nan)
                results['dNdE']['Average_Error'].append(np.nan)
                results['dNdE']['Lower_Error'].append(np.nan)
                results['dNdE']['Upper_Error'].append(np.nan)
                results['dNdE']['Upper_Limit'].append(cp(r.uflux/fac))

        return tolist(results)
Пример #3
0
def pointlike_model_to_flux(model, emin, emax, flux_units='erg', energy_units='MeV', 
                            errors=True, include_prefactor=False, prefactor_energy=None):

    cef=lambda e: units.convert(e,'MeV',flux_units)
    ce=lambda e: units.convert(e,'MeV',energy_units)
    f=dict()
    if errors:
        f['flux'],f['flux_err']=model.i_flux(emin=emin,emax=emax,error=True)
        ef,ef_err=model.i_flux(emin=emin,emax=emax,e_weight=1,error=True)
        f['eflux'],f['eflux_err']=cef(ef),cef(ef_err)
    else:
        f['flux']=model.i_flux(emin=emin,emax=emax,error=False)
        ef=model.i_flux(emin=emin,emax=emax,e_weight=1,error=False)
        f['eflux']=cef(ef)

    f['flux_units']='ph/cm^2/s'
    f['eflux_units']='%s/cm^2/s' % flux_units
    f['energy_units']=energy_units
    f['emin'],f['emax']=ce(emin),ce(emax)

    if include_prefactor:
        assert prefactor_energy is not None
        cp = lambda e: units.convert(e,'1/MeV','1/%s' % flux_units)
        f['prefactor'] = cp(model(prefactor_energy))
        f['prefactor_units'] = 'ph/cm^2/s/%s' % flux_units
        f['prefactor_energy'] = ce(prefactor_energy)

    return tolist(f)
Пример #4
0
def gtlike_flux_dict(like,name, emin=None,emax=None,flux_units='erg', energy_units='MeV',
                     errors=True, include_prefactor=False, prefactor_energy=None):
    """ Note, emin, emax, and prefactor_energy must be in MeV """

    if emin is None and emax is None: 
        emin, emax = get_full_energy_range(like)

    cef=lambda e: units.convert(e,'MeV',flux_units)
    ce=lambda e: units.convert(e,'MeV',energy_units)
    f=dict(flux=like.flux(name,emin=emin,emax=emax),
           flux_units='ph/cm^2/s',
           eflux=cef(like.energyFlux(name,emin=emin,emax=emax)),
           eflux_units='%s/cm^2/s' % flux_units,
           emin=ce(emin),
           emax=ce(emax),
           energy_units=energy_units)

    if errors:
        try:
            # incase the errors were not calculated
            f['flux_err']=like.fluxError(name,emin=emin,emax=emax)
            f['eflux_err']=cef(like.energyFluxError(name,emin=emin,emax=emax))
        except Exception, ex:
            print 'ERROR calculating flux error: ', ex
            traceback.print_exc(file=sys.stdout)
            f['flux_err']=-1
            f['eflux_err']=-1
Пример #5
0
    def plot_points(self, axes, **kwargs):
        """ Plot the SED using matpotlib. """
        data_kwargs=dict(color='black')
        data_kwargs.update(kwargs)


        edict = self.results['Energy']
        fdict = self.results['dNdE']

        file_energy_units = units.fromstring(edict['Units'])
        file_flux_units = units.fromstring(fdict['Units'])

        ce = lambda x: units.convert(np.asarray(x),file_energy_units, axes.energy_units_obj)

        # get energy part
        energy = ce(edict['Value'])
        if 'Lower' in edict and 'Upper' in edict:
            lower_energy = ce(edict['Lower'])
            upper_energy = ce(edict['Upper'])
            has_energy_errors = True
        else:
            has_energy_errors = False

        # get spectral part

        cf = lambda y: units.convert(energy**2*np.asarray(y),
                                     axes.energy_units_obj**2*file_flux_units,
                                     axes.flux_units_obj/units.cm**2/units.s)

        dnde = cf(fdict['Value'])

        if 'Lower_Error' in fdict and 'Upper_Error' in fdict:
            # assymetric errors
            dnde_lower_err = cf(fdict['Lower_Error'])
            dnde_upper_err = cf(fdict['Upper_Error'])
            has_assymetric_errors = True
        else:
            has_assymetric_errors = False
            dnde_err = cf(fdict['Average_Error'])

        # get limits, otherwise assume all significant
        if 'Upper_Limit' in fdict and 'Significant' in self.results:
            dnde_ul = cf(fdict['Upper_Limit'])
            significant = np.asarray(self.results['Significant'])
            has_upper_limits=True
        else:
            has_upper_limits=False

        plot_points(
            x=energy,
            xlo=lower_energy if has_energy_errors else None,
            xhi=upper_energy if has_energy_errors else None,
            y=dnde,
            y_lower_err=dnde_lower_err if has_assymetric_errors else dnde_err,
            y_upper_err=dnde_upper_err if has_assymetric_errors else dnde_err,
            y_ul=dnde_ul if has_upper_limits else None,
            significant=significant if has_upper_limits else np.ones(len(energy),dtype=bool),
            axes=axes, **data_kwargs)
Пример #6
0
    def pointlike_sed_to_dict(bandflux, flux_units='erg', energy_units='MeV'):
        results = defaultdict(lambda: defaultdict(list))

        ce = lambda e: units.convert(e, 'MeV', energy_units)
        cp = lambda e: units.convert(e, '1/MeV', '1/%s' % flux_units)

        results['Energy']['Units'] = energy_units
        results['dNdE']['Units'] = 'ph/cm^2/s/%s' % flux_units

        results['Significant'] = []
        for r in bandflux.rec:

            results['Energy']['Lower'].append(ce(r.elow))
            results['Energy']['Upper'].append(ce(r.ehigh))
            results['Energy']['Value'].append(ce(np.sqrt(r.elow * r.ehigh)))

            # Undo scaling in the bandflux recarray
            fac = r.elow * r.ehigh * bandflux.scale_factor

            if r.flux > 0:
                results['Significant'].append(True)
                results['dNdE']['Value'].append(cp(r.flux / fac))
                results['dNdE']['Average_Error'].append(
                    cp((r.uflux / fac - r.lflux / fac) / 2))
                results['dNdE']['Lower_Error'].append(
                    cp((r.flux - r.lflux) / fac))
                results['dNdE']['Upper_Error'].append(
                    cp((r.uflux - r.flux) / fac))
                results['dNdE']['Upper_Limit'].append(np.nan)
            else:
                results['Significant'].append(False)
                results['dNdE']['Value'].append(np.nan)
                results['dNdE']['Average_Error'].append(np.nan)
                results['dNdE']['Lower_Error'].append(np.nan)
                results['dNdE']['Upper_Error'].append(np.nan)
                results['dNdE']['Upper_Limit'].append(cp(r.uflux / fac))

        return tolist(results)
Пример #7
0
def pointlike_powerlaw_prefactor_dict(roi, which, flux_units='erg', errors=True):
    model=roi.get_model(which)

    assert isinstance(model,PowerLaw)

    cp = lambda e: units.convert(e,'1/MeV','1/%s' % flux_units)
    d = dict()
    d['prefactor'] = cp(model['norm'])
    if errors:
        d['prefactor_err'] = cp(model.error('norm'))
    d['prefactor_units'] = 'ph/cm^2/s/%s' % flux_units
    d['prefactor_energy'] = model.e0
    d['prefactor_energy_units'] = 'MeV'
    return d
Пример #8
0
def gtlike_powerlaw_prefactor_dict(like, name, flux_units='erg', errors=True, minos_errors=False):
    cp = lambda e: units.convert(e,'1/MeV','1/%s' % flux_units)

    source = like.logLike.getSource(name)
    spectrum = source.spectrum()
    assert spectrum.genericName() == 'PowerLaw'
    pref = spectrum.getParam('Prefactor')
    scale = spectrum.getParam('Scale')

    d=dict()
    d['prefactor'] = cp(pref.getTrueValue())
    if errors:
        d['prefactor_err'] = cp(pref.error()*pref.getScale())
    if minos_errors:
        try:
            lower,upper=like.minosError(name, 'Prefactor')
            d['prefactor_lower_err'] = cp(-1*lower*pref.getScale())
            d['prefactor_upper_err'] = cp(upper*pref.getScale())
        except Exception, ex:
            print 'ERROR computing Minos errors on parameter Prefactor for source %s:' % (name), ex
            d['prefactor_lower_err'] = np.nan
            d['prefactor_upper_err'] = np.nan
Пример #9
0
def energy_dict(emin, emax, energy_units='MeV'):
    ce=lambda e: units.convert(e,'MeV',energy_units)
    return dict(emin=ce(emin),
                emax=ce(emax),
                emiddle=ce(np.sqrt(emin*emax)),
                energy_units=energy_units)
Пример #10
0
    if errors:
        try:
            # incase the errors were not calculated
            f['flux_err']=like.fluxError(name,emin=emin,emax=emax)
            f['eflux_err']=cef(like.energyFluxError(name,emin=emin,emax=emax))
        except Exception, ex:
            print 'ERROR calculating flux error: ', ex
            traceback.print_exc(file=sys.stdout)
            f['flux_err']=-1
            f['eflux_err']=-1

    if include_prefactor:
        assert prefactor_energy is not None
        source = like.logLike.getSource(name)
        spectrum = source.spectrum()
        cp = lambda e: units.convert(e,'1/MeV','1/%s' % flux_units)
        f['prefactor'] = cp(SpectrumPlotter.get_dnde_mev(spectrum,prefactor_energy))
        f['prefactor_units'] = 'ph/cm^2/s/%s' % flux_units
        f['prefactor_energy'] = ce(prefactor_energy)
    return tolist(f)

def gtlike_powerlaw_prefactor_dict(like, name, flux_units='erg', errors=True, minos_errors=False):
    cp = lambda e: units.convert(e,'1/MeV','1/%s' % flux_units)

    source = like.logLike.getSource(name)
    spectrum = source.spectrum()
    assert spectrum.genericName() == 'PowerLaw'
    pref = spectrum.getParam('Prefactor')
    scale = spectrum.getParam('Scale')

    d=dict()
Пример #11
0
    def get_results(self, pwn):

        classifier = self.get_classification(pwn)

        spatial_model=classifier['spatial_model']
        spectral_model=classifier['spectral_model']
        source_class = classifier['source_class']

        if spatial_model is None or spectral_model is None or source_class is None:
            print '%s has not been classified yet, skipping' % pwn
            return None

        assert source_class in PWNClassifier.allowed_source_class
        assert source_class == 'Upper_Limit' or spatial_model in PWNClassifier.allowed_spatial_models
        assert source_class == 'Upper_Limit' or spectral_model in PWNClassifier.allowed_spectral_models 

        results = self.loader.get_results(pwn, require_all_exists=True, get_variability=True)

        if results is None:
            print 'Results for %s is not done yet, skipping' % pwn
            return None

        point_gtlike = results['point']['gtlike']
        extended_gtlike = results['extended']['gtlike']

        if isnan(spatial_model): # upper limits
            gtlike = results['at_pulsar']['gtlike']
            pointlike = results['at_pulsar']['pointlike']
        else:
            gtlike = results[spatial_model.lower()]['gtlike']
            pointlike = results[spatial_model.lower()]['pointlike']

        at_pulsar_cutoff=results['at_pulsar']['gtlike']['test_cutoff']

        d = copy.copy(classifier)

        d['raw_phase'] = results['raw_phase']

        if results.has_key('shifted_phase'):
            d['shifted_phase'] = results['shifted_phase']

        # likelihood stuff

        d['ts_point'] = max(point_gtlike['TS']['reoptimize'],0)

        d['abbreviated_source_class'] = self.abbreviated_source_class_mapper[source_class]

        if source_class in ['Confused', 'Pulsar', 'Pulsar_Confused', 'PWN']:
            d['ts_ext'] = max(extended_gtlike['TS']['reoptimize']-point_gtlike['TS']['reoptimize'],0)
            d['ts_cutoff'] = max(at_pulsar_cutoff['hypothesis_1']['TS']['reoptimize']-at_pulsar_cutoff['hypothesis_0']['TS']['reoptimize'],0)

            alt_models = [point_gtlike['altdiff'][dist,halo,TS] for dist,halo,TS in itertools.product(['SNR','Lorimer'],[4,10],[150,100000])]
            if np.any([i is None for i in alt_models]):
                d['ts_altdiff'] = None
                print 'BAD = '
            else:
                all_TS = [ i['TS']['reoptimize'] if i is not None else None for i in alt_models]
                d['ts_altdiff'] = max(min(all_TS),0)
                print d['ts_point'], all_TS, d['ts_altdiff'] 

        elif source_class == 'Upper_Limit':
            pass
        else:
            raise Exception("...")

        if spatial_model == 'Extended':
            # note, does not make sense to use extended spatial model for variability
            d['ts_var'] = results['point']['variability']['TS_var']['gtlike']
        elif isinstance(spatial_model,float) and np.isnan(spatial_model):
            # upper limit
            d['ts_var'] = results['at_pulsar']['variability']['TS_var']['gtlike']
        else:
            d['ts_var'] = results[spatial_model.lower()]['variability']['TS_var']['gtlike']

        # spectral stuff

        d['flux'] = None
        d['flux_err'] = None

        d['energy_flux'] = None
        d['energy_flux_err'] = None

        d['prefactor'] = None
        d['prefactor_err'] = None

        d['normalization'] = None
        d['normalization_err'] = None

        d['index'] = None
        d['index_err'] = None

        d['model_scale'] = None

        d['cutoff'] = None
        d['cutoff_err'] = None

        convert_prefactor = lambda x: units.convert(x, 'ph/cm^2/s/MeV', 'ph/cm^2/s/erg')

        if source_class != 'Upper_Limit':

            if spectral_model in ['PowerLaw','FileFunction']:
                d['flux'] = gtlike['flux']['flux']
                d['flux_err'] = gtlike['flux']['flux_err']

                assert gtlike['flux']['flux_units'] == 'ph/cm^2/s'
                d['energy_flux'] = gtlike['flux']['eflux']
                d['energy_flux_err'] = gtlike['flux']['eflux_err']
                assert gtlike['flux']['eflux_units'] == 'erg/cm^2/s'

                d['spectrum'] = gtlike['spectrum']

                if spectral_model == 'PowerLaw':

                    # Note, prefactor is 
                    d['prefactor'] = convert_prefactor(gtlike['spectrum']['Prefactor'])
                    d['prefactor_err'] = convert_prefactor(gtlike['spectrum']['Prefactor_err'])

                    d['index'] = -1*gtlike['spectrum']['Index']
                    d['index_err'] = np.abs(gtlike['spectrum']['Index_err'])

                    d['model_scale'] = gtlike['spectrum']['Scale']

                elif spectral_model == 'FileFunction':

                    d['normalization'] = gtlike['spectrum']['Normalization']
                    d['normalization_err'] = gtlike['spectrum']['Normalization_err']

            elif spectral_model == 'PLSuperExpCutoff':
                h1 = gtlike['test_cutoff']['hypothesis_1']

                d['spectrum'] = h1['spectrum']

                d['flux'] = h1['flux']['flux']
                d['flux_err'] = h1['flux']['flux_err']

                d['energy_flux'] = h1['flux']['eflux']
                d['energy_flux_err'] = h1['flux']['eflux_err']

                assert h1['flux']['flux_units'] == 'ph/cm^2/s'
                assert h1['flux']['eflux_units'] == 'erg/cm^2/s'
            
                d['prefactor'] = convert_prefactor(h1['spectrum']['Prefactor'])
                d['prefactor_err'] = convert_prefactor(h1['spectrum']['Prefactor_err'])

                d['index'] = -1*h1['spectrum']['Index1']
                d['index_err'] = np.abs(h1['spectrum']['Index1_err'])

                d['model_scale'] = h1['spectrum']['Scale']

                d['cutoff'] = h1['spectrum']['Cutoff']
                d['cutoff_err'] = h1['spectrum']['Cutoff_err']
        else:
            # add in upper limits
            pass

        # spatial stuff

        d['ra'] = pointlike['position']['equ'][0]
        d['dec'] = pointlike['position']['equ'][1]

        d['glon'] = pointlike['position']['gal'][0]
        d['glat'] = pointlike['position']['gal'][1]

        d['poserr'] = None

        if spatial_model in [ 'Point', 'Extended' ]: 

            ellipse = pointlike['spatial_model']['ellipse']
            if ellipse.has_key('lsigma'):
                d['poserr'] = ellipse['lsigma']
            else:
                print 'WARNING: localization failed for %s' % pwn
                d['poserr'] = None

        d['extension'] = None
        d['extension_err'] = None
        if spatial_model == 'Extended':
            d['extension'] = pointlike['spatial_model']['Sigma']
            d['extension_err'] = pointlike['spatial_model']['Sigma_err']

        d['powerlaw_flux_upper_limit'] = None
        d['powerlaw_energy_flux_upper_limit'] = None

        d['cutoff_flux_upper_limit'] = None
        d['cutoff_energy_flux_upper_limit'] = None

        if source_class in ['Confused', 'Pulsar', 'Pulsar_Confused', 'PWN']:
            pass
        elif source_class == 'Upper_Limit':
            d['powerlaw_flux_upper_limit'] = gtlike['powerlaw_upper_limit']['flux']
            d['powerlaw_energy_flux_upper_limit'] = gtlike['powerlaw_upper_limit']['eflux']

            assert gtlike['powerlaw_upper_limit']['flux_units'] == 'ph/cm^2/s'
            assert gtlike['powerlaw_upper_limit']['eflux_units'] == 'erg/cm^2/s'

            d['cutoff_flux_upper_limit'] = gtlike['cutoff_upper_limit']['flux']
            d['cutoff_energy_flux_upper_limit'] = gtlike['cutoff_upper_limit']['eflux']
            
            assert gtlike['cutoff_upper_limit']['flux_units'] == 'ph/cm^2/s'
            assert gtlike['cutoff_upper_limit']['eflux_units'] == 'erg/cm^2/s'
        else:
            raise Exception("...")

        # add in 4BPD SED
        sed = gtlike['seds']['4bpd']
        sed_ts = np.asarray(sed['Test_Statistic'])
        sed_ts = np.where(sed_ts>0,sed_ts,0)
        sed_lower_energy = np.asarray(sed['Energy']['Lower'])
        sed_upper_energy = np.asarray(sed['Energy']['Upper'])
        sed_middle_energy = np.asarray(sed['Energy']['Value'])
        sed_prefactor = np.asarray(sed['dNdE']['Value'])
        sed_prefactor_lower_err = np.asarray(sed['dNdE']['Lower_Error'])
        sed_prefactor_upper_err = np.asarray(sed['dNdE']['Upper_Error'])
        sed_prefactor_upper_limit = np.asarray(sed['dNdE']['Upper_Limit'])

        assert sed['Energy']['Units'] == 'MeV'
        assert sed['dNdE']['Units'] == 'ph/cm^2/s/erg'

        d['sed_4bpd'] = sed

        # Note, when overall source is not significant, do not include upper limits
        d['sed_ts'] = sed_ts
        d['sed_lower_energy'] = sed_lower_energy
        d['sed_upper_energy'] = sed_upper_energy
        d['sed_middle_energy'] = sed_middle_energy
        if source_class in ['Confused', 'Pulsar', 'Pulsar_Confused', 'PWN']:
            significant = (sed_ts >= 4)
            d['sed_prefactor'] = np.where(significant, sed_prefactor, np.nan)
            d['sed_prefactor_lower_err'] = np.where(significant, sed_prefactor_lower_err, np.nan)
            d['sed_prefactor_upper_err'] = np.where(significant, sed_prefactor_upper_err, np.nan)
            d['sed_prefactor_upper_limit'] = np.where(~significant, sed_prefactor_upper_limit, np.nan)
        elif source_class == 'Upper_Limit':
            array_from = lambda x: np.asarray([x]*len(sed_ts))
            d['sed_prefactor'] = array_from(np.nan)
            d['sed_prefactor_lower_err'] = array_from(np.nan)
            d['sed_prefactor_upper_err'] = array_from(np.nan)
            d['sed_prefactor_upper_limit'] = sed_prefactor_upper_limit
        else:
            raise Exception("...")

        return d
Пример #12
0
    def get_results(self, pwn):

        classifier = self.get_classification(pwn)

        spatial_model = classifier['spatial_model']
        spectral_model = classifier['spectral_model']
        source_class = classifier['source_class']

        if spatial_model is None or spectral_model is None or source_class is None:
            print '%s has not been classified yet, skipping' % pwn
            return None

        assert source_class in PWNClassifier.allowed_source_class
        assert source_class == 'Upper_Limit' or spatial_model in PWNClassifier.allowed_spatial_models
        assert source_class == 'Upper_Limit' or spectral_model in PWNClassifier.allowed_spectral_models

        results = self.loader.get_results(pwn,
                                          require_all_exists=True,
                                          get_variability=True)

        if results is None:
            print 'Results for %s is not done yet, skipping' % pwn
            return None

        point_gtlike = results['point']['gtlike']
        extended_gtlike = results['extended']['gtlike']

        if isnan(spatial_model):  # upper limits
            gtlike = results['at_pulsar']['gtlike']
            pointlike = results['at_pulsar']['pointlike']
        else:
            gtlike = results[spatial_model.lower()]['gtlike']
            pointlike = results[spatial_model.lower()]['pointlike']

        at_pulsar_cutoff = results['at_pulsar']['gtlike']['test_cutoff']

        d = copy.copy(classifier)

        d['raw_phase'] = results['raw_phase']

        if results.has_key('shifted_phase'):
            d['shifted_phase'] = results['shifted_phase']

        # likelihood stuff

        d['ts_point'] = max(point_gtlike['TS']['reoptimize'], 0)

        d['abbreviated_source_class'] = self.abbreviated_source_class_mapper[
            source_class]

        if source_class in ['Confused', 'Pulsar', 'Pulsar_Confused', 'PWN']:
            d['ts_ext'] = max(
                extended_gtlike['TS']['reoptimize'] -
                point_gtlike['TS']['reoptimize'], 0)
            d['ts_cutoff'] = max(
                at_pulsar_cutoff['hypothesis_1']['TS']['reoptimize'] -
                at_pulsar_cutoff['hypothesis_0']['TS']['reoptimize'], 0)

            alt_models = [
                point_gtlike['altdiff'][dist, halo, TS] for dist, halo, TS in
                itertools.product(['SNR', 'Lorimer'], [4, 10], [150, 100000])
            ]
            if np.any([i is None for i in alt_models]):
                d['ts_altdiff'] = None
                print 'BAD = '
            else:
                all_TS = [
                    i['TS']['reoptimize'] if i is not None else None
                    for i in alt_models
                ]
                d['ts_altdiff'] = max(min(all_TS), 0)
                print d['ts_point'], all_TS, d['ts_altdiff']

        elif source_class == 'Upper_Limit':
            pass
        else:
            raise Exception("...")

        if spatial_model == 'Extended':
            # note, does not make sense to use extended spatial model for variability
            d['ts_var'] = results['point']['variability']['TS_var']['gtlike']
        elif isinstance(spatial_model, float) and np.isnan(spatial_model):
            # upper limit
            d['ts_var'] = results['at_pulsar']['variability']['TS_var'][
                'gtlike']
        else:
            d['ts_var'] = results[
                spatial_model.lower()]['variability']['TS_var']['gtlike']

        # spectral stuff

        d['flux'] = None
        d['flux_err'] = None

        d['energy_flux'] = None
        d['energy_flux_err'] = None

        d['prefactor'] = None
        d['prefactor_err'] = None

        d['normalization'] = None
        d['normalization_err'] = None

        d['index'] = None
        d['index_err'] = None

        d['model_scale'] = None

        d['cutoff'] = None
        d['cutoff_err'] = None

        convert_prefactor = lambda x: units.convert(x, 'ph/cm^2/s/MeV',
                                                    'ph/cm^2/s/erg')

        if source_class != 'Upper_Limit':

            if spectral_model in ['PowerLaw', 'FileFunction']:
                d['flux'] = gtlike['flux']['flux']
                d['flux_err'] = gtlike['flux']['flux_err']

                assert gtlike['flux']['flux_units'] == 'ph/cm^2/s'
                d['energy_flux'] = gtlike['flux']['eflux']
                d['energy_flux_err'] = gtlike['flux']['eflux_err']
                assert gtlike['flux']['eflux_units'] == 'erg/cm^2/s'

                d['spectrum'] = gtlike['spectrum']

                if spectral_model == 'PowerLaw':

                    # Note, prefactor is
                    d['prefactor'] = convert_prefactor(
                        gtlike['spectrum']['Prefactor'])
                    d['prefactor_err'] = convert_prefactor(
                        gtlike['spectrum']['Prefactor_err'])

                    d['index'] = -1 * gtlike['spectrum']['Index']
                    d['index_err'] = np.abs(gtlike['spectrum']['Index_err'])

                    d['model_scale'] = gtlike['spectrum']['Scale']

                elif spectral_model == 'FileFunction':

                    d['normalization'] = gtlike['spectrum']['Normalization']
                    d['normalization_err'] = gtlike['spectrum'][
                        'Normalization_err']

            elif spectral_model == 'PLSuperExpCutoff':
                h1 = gtlike['test_cutoff']['hypothesis_1']

                d['spectrum'] = h1['spectrum']

                d['flux'] = h1['flux']['flux']
                d['flux_err'] = h1['flux']['flux_err']

                d['energy_flux'] = h1['flux']['eflux']
                d['energy_flux_err'] = h1['flux']['eflux_err']

                assert h1['flux']['flux_units'] == 'ph/cm^2/s'
                assert h1['flux']['eflux_units'] == 'erg/cm^2/s'

                d['prefactor'] = convert_prefactor(h1['spectrum']['Prefactor'])
                d['prefactor_err'] = convert_prefactor(
                    h1['spectrum']['Prefactor_err'])

                d['index'] = -1 * h1['spectrum']['Index1']
                d['index_err'] = np.abs(h1['spectrum']['Index1_err'])

                d['model_scale'] = h1['spectrum']['Scale']

                d['cutoff'] = h1['spectrum']['Cutoff']
                d['cutoff_err'] = h1['spectrum']['Cutoff_err']
        else:
            # add in upper limits
            pass

        # spatial stuff

        d['ra'] = pointlike['position']['equ'][0]
        d['dec'] = pointlike['position']['equ'][1]

        d['glon'] = pointlike['position']['gal'][0]
        d['glat'] = pointlike['position']['gal'][1]

        d['poserr'] = None

        if spatial_model in ['Point', 'Extended']:

            ellipse = pointlike['spatial_model']['ellipse']
            if ellipse.has_key('lsigma'):
                d['poserr'] = ellipse['lsigma']
            else:
                print 'WARNING: localization failed for %s' % pwn
                d['poserr'] = None

        d['extension'] = None
        d['extension_err'] = None
        if spatial_model == 'Extended':
            d['extension'] = pointlike['spatial_model']['Sigma']
            d['extension_err'] = pointlike['spatial_model']['Sigma_err']

        d['powerlaw_flux_upper_limit'] = None
        d['powerlaw_energy_flux_upper_limit'] = None

        d['cutoff_flux_upper_limit'] = None
        d['cutoff_energy_flux_upper_limit'] = None

        if source_class in ['Confused', 'Pulsar', 'Pulsar_Confused', 'PWN']:
            pass
        elif source_class == 'Upper_Limit':
            d['powerlaw_flux_upper_limit'] = gtlike['powerlaw_upper_limit'][
                'flux']
            d['powerlaw_energy_flux_upper_limit'] = gtlike[
                'powerlaw_upper_limit']['eflux']

            assert gtlike['powerlaw_upper_limit']['flux_units'] == 'ph/cm^2/s'
            assert gtlike['powerlaw_upper_limit'][
                'eflux_units'] == 'erg/cm^2/s'

            d['cutoff_flux_upper_limit'] = gtlike['cutoff_upper_limit']['flux']
            d['cutoff_energy_flux_upper_limit'] = gtlike['cutoff_upper_limit'][
                'eflux']

            assert gtlike['cutoff_upper_limit']['flux_units'] == 'ph/cm^2/s'
            assert gtlike['cutoff_upper_limit']['eflux_units'] == 'erg/cm^2/s'
        else:
            raise Exception("...")

        # add in 4BPD SED
        sed = gtlike['seds']['4bpd']
        sed_ts = np.asarray(sed['Test_Statistic'])
        sed_ts = np.where(sed_ts > 0, sed_ts, 0)
        sed_lower_energy = np.asarray(sed['Energy']['Lower'])
        sed_upper_energy = np.asarray(sed['Energy']['Upper'])
        sed_middle_energy = np.asarray(sed['Energy']['Value'])
        sed_prefactor = np.asarray(sed['dNdE']['Value'])
        sed_prefactor_lower_err = np.asarray(sed['dNdE']['Lower_Error'])
        sed_prefactor_upper_err = np.asarray(sed['dNdE']['Upper_Error'])
        sed_prefactor_upper_limit = np.asarray(sed['dNdE']['Upper_Limit'])

        assert sed['Energy']['Units'] == 'MeV'
        assert sed['dNdE']['Units'] == 'ph/cm^2/s/erg'

        d['sed_4bpd'] = sed

        # Note, when overall source is not significant, do not include upper limits
        d['sed_ts'] = sed_ts
        d['sed_lower_energy'] = sed_lower_energy
        d['sed_upper_energy'] = sed_upper_energy
        d['sed_middle_energy'] = sed_middle_energy
        if source_class in ['Confused', 'Pulsar', 'Pulsar_Confused', 'PWN']:
            significant = (sed_ts >= 4)
            d['sed_prefactor'] = np.where(significant, sed_prefactor, np.nan)
            d['sed_prefactor_lower_err'] = np.where(significant,
                                                    sed_prefactor_lower_err,
                                                    np.nan)
            d['sed_prefactor_upper_err'] = np.where(significant,
                                                    sed_prefactor_upper_err,
                                                    np.nan)
            d['sed_prefactor_upper_limit'] = np.where(
                ~significant, sed_prefactor_upper_limit, np.nan)
        elif source_class == 'Upper_Limit':
            array_from = lambda x: np.asarray([x] * len(sed_ts))
            d['sed_prefactor'] = array_from(np.nan)
            d['sed_prefactor_lower_err'] = array_from(np.nan)
            d['sed_prefactor_upper_err'] = array_from(np.nan)
            d['sed_prefactor_upper_limit'] = sed_prefactor_upper_limit
        else:
            raise Exception("...")

        return d