예제 #1
0
파일: bootstrap.py 프로젝트: sabren/drupy
def variable_set(name, value):
    """
   Set a persistent variable.
  
   @param name
     The name of the variable to set.
   @param value
     The value to set. This can be any PHP data type; these functions take care
     of serialization as necessary.
  """
    lib_database.merge("variable").key({"name": name}).fields({"value": php.serialize(value)}).execute()
    cache_clear_all("variables", "cache")
    settings.conf[name] = value
예제 #2
0
파일: bootstrap.py 프로젝트: vaibbhav/drupy
def variable_set(name, value):
    """
   Set a persistent variable.
  
   @param name
     The name of the variable to set.
   @param value
     The value to set. This can be any PHP data type; these functions take care
     of serialization as necessary.
  """
    lib_database.merge('variable').key({'name' : name}).fields(\
      {'value' : php.serialize(value)}).execute()
    cache_clear_all('variables', 'cache')
    settings.conf[name] = value
예제 #3
0
def find_channels():
    '''
    Function to determine the channel range needed for input during extraction.
    Requires the file energy_conversion_table.txt to determine the initial
    channel selection.
    '''

    purpose = 'Finding the correct channels for later extraction'
    print len(purpose) * '=' + '\n' + purpose + '\n' + len(purpose) * '='

    import os
    import pandas as pd
    import glob
    from collections import defaultdict
    import paths
    import logs
    import execute_shell_commands as shell
    import database

    # Set log file
    filename = __file__.split('/')[-1].split('.')[0]
    logs.output(filename)

    os.chdir(paths.data)
    db = pd.read_csv(paths.database)

    d = defaultdict(list)
    for obsid, group in db.groupby(['obsids']):
        group = group.drop_duplicates('paths_data')

        print obsid
        for mode, path, time, bit in zip(group.modes, group.paths_data,
                                         group.times, group.bitsize):

            if mode == 'std1':
                d['paths_data'].append(path)
                d['energy_channels'].append('INDEF')
                continue

            # Determine channels according to epoch
            abs_channels = calculated_energy_range(time, MIN_E, MAX_E, bit)
            final_channels = abs_channels

            # Check in which fashion the channels are binned, and return these
            if mode == 'event' or mode == 'binned':
                bin_channels = get_channel_range(mode, abs_channels, path, bit)
                final_channels = bin_channels

            print '   ', mode, '-->', final_channels

            d['paths_data'].append(path)
            d['energy_channels'].append(final_channels)

    # Update database and save
    df = pd.DataFrame(d)
    db = database.merge(db, df, ['energy_channels'])
    database.save(db)
    logs.stop_logging()
def create_power_colours():
    '''
    Function to generate power spectral density based on RXTE lightcurves.
    '''

    # Let the user know what's going to happen
    purpose = 'Creating Power Colours'
    print len(purpose) * '=' + '\n' + purpose + '\n' + len(purpose) * '='

    import os
    import pandas as pd
    import glob
    from collections import defaultdict
    from math import isnan
    import paths
    import logs
    import execute_shell_commands as shell
    import database

    # Set log file
    filename = __file__.split('/')[-1].split('.')[0]
    logs.output(filename)

    # Get database
    os.chdir(paths.data)
    db = pd.read_csv(paths.database)

    d = defaultdict(list)
    for ps, group in db.groupby('power_spectra'):

        # Determine parameters
        obsid = group.obsids.values[0]
        path_obsid = group.paths_obsid.values[0]
        mode = group.modes.values[0]
        res = group.resolutions.values[0]

        print obsid, mode, res

        # Calculate power colour
        output = power_colour(ps)

        if output:
            pc1, pc1err, pc2, pc2err, constraint = output

            d['power_spectra'].append(ps)
            d['pc1'].append(pc1)
            d['pc1_err'].append(pc1err)
            d['pc2'].append(pc2)
            d['pc2_err'].append(pc2err)
            d['lt3sigma'].append(constraint)

    # Update database and save
    df = pd.DataFrame(d)
    db = database.merge(db, df,
                        ['pc1', 'pc1_err', 'pc2', 'pc2_err', 'lt3sigma'])
    print 'DBNUNIQUE\n', db.apply(pd.Series.nunique)
    database.save(db)
    logs.stop_logging()
예제 #5
0
def cut_xray_flares():
    '''
    Function to find X-ray flares in a light curve by finding when the rate
    exceeds more than 7sigma, and then cut around it. Write output to a file
    in an obsid-folder with the name corrected_rate_minus_xray_flare_
    <timingresolution>.dat if an X-ray flare was detected
    '''

    # Let the user know what's going to happen
    purpose = 'Determining X-ray flares'
    print len(purpose) * '=' + '\n' + purpose + '\n' + len(purpose) * '='

    import os
    import pandas as pd
    import glob
    from collections import defaultdict
    from math import isnan
    import paths
    import logs
    import execute_shell_commands as shell
    import database

    # Set log file
    filename = __file__.split('/')[-1].split('.')[0]
    logs.output(filename)

    os.chdir(paths.data)
    db = pd.read_csv(paths.database)

    d = defaultdict(list)
    for path_lc, group in db.groupby('bkg_corrected_lc'):

        # Set parameters
        obsid = group.obsids.values[0]
        path_bkg = group.rebinned_bkg.values[0]
        res = group.resolutions.values[0]
        mode = group.modes.values[0]
        path_obsid = group.paths_obsid.values[0]
        print obsid, mode, res

        # Calculate whether flare present
        result = cut_flare(path_obsid, path_lc, path_bkg, res, mode)

        if result:
            print 'Flare between:', result[2]
            d['bkg_corrected_lc'].append(path_lc)
            d['lc_no_flare'].append(result[0])
            d['bkg_no_flare'].append(result[1])
            d['flare_times'].append(result[2])

    # Update database and save
    df = pd.DataFrame(d)
    db = database.merge(db, df, ['lc_no_flare', 'bkg_no_flare', 'flare_times'])
    database.save(db)
    logs.stop_logging()
예제 #6
0
    def analyze(self, x):
        if isinstance(x, list):
            return [self.analyze(y) for y in x]
        if isinstance(x, AudioData):
            return self.process(x)
        if isinstance(x, tuple):
            return self.analyze(*x)

        log.info("Grabbing stream...", uid=x.id)
        laf = LocalAudioStream(self.get_stream(x))
        setattr(laf, "_metadata", x)
        Database().ensure(merge(x, laf.analysis))
        return self.process(laf)
예제 #7
0
    def analyze(self, x):
        if isinstance(x, list):
            return [self.analyze(y) for y in x]
        if isinstance(x, AudioData):
            return self.process(x)
        if isinstance(x, tuple):
            return self.analyze(*x)

        log.info("Grabbing stream...", uid=x.id)
        laf = LocalAudioStream(self.get_stream(x))
        #   To ensure that we have output,
        try:
            #   Read a single sample from the output to ensure
            #   FFMPEG can read *any* audio for us.
            laf[0:1]
        except (IOError, ValueError):
            raise ValueError("Could not read any samples from FFMPEG!")
        setattr(laf, "_metadata", x)
        Database().ensure(merge(x, laf.analysis))
        return self.process(laf)
예제 #8
0
    def analyze(self, x):
        if isinstance(x, list):
            return [self.analyze(y) for y in x]
        if isinstance(x, AudioData):
            return self.process(x)
        if isinstance(x, tuple):
            return self.analyze(*x)

        log.info("Grabbing stream...", uid=x.id)
        laf = LocalAudioStream(self.get_stream(x))
        #   To ensure that we have output,
        try:
            #   Read a single sample from the output to ensure
            #   FFMPEG can read *any* audio for us.
            laf[0:1]
        except (IOError, ValueError):
            raise ValueError("Could not read any samples from FFMPEG!")
        setattr(laf, "_metadata", x)
        Database().ensure(merge(x, laf.analysis))
        return self.process(laf)
예제 #9
0
def spacecraft_filters():
    '''
    Function to run the ftool maketime over all filter files (.xfl.gz files).
    Creates time_filter.gti files and updates database with path to gti files
    '''

    purpose = 'Create time filters'
    print len(purpose) * '=' + '\n' + purpose + '\n' + len(purpose) * '='

    from astropy.io import fits
    import os
    import pandas as pd
    import glob
    import numpy as np
    from collections import defaultdict
    import paths
    import logs
    import execute_shell_commands as shell
    import database

    # Set log file
    filename = __file__.split('/')[-1].split('.')[0]
    logs.output(filename)

    os.chdir(paths.data)
    db = pd.read_csv(paths.database)

    # Run maketime for each obsid
    d = defaultdict(list)
    for obsid, group in db.groupby(['obsids']):

        print obsid

        # Check whether an observation has high count rates
        lc = group.paths_obsid.values[0] + 'stdprod/xp' + obsid.replace(
            '-', '') + '_n1.lc.gz'
        try:
            hdulist = fits.open(lc)
            data = hdulist[1].data
            _, rate, _, _, _ = zip(*data)
            mean = np.nanmean(rate)
        except IOError:
            mean = 0.

        f = group.paths_obsid.values[0] + 'stdprod/x' + obsid.replace(
            '-', '') + '.xfl.gz'
        gti = group.paths_obsid.values[0] + 'time_filter.gti'

        # Remove previous version (maketime doesn't like them)
        try:
            os.remove(gti)
        except OSError:
            pass

        # Selection expression for maketime
        sel = (
            'elv.gt.10.and.' + 'offset.lt.0.02.and.' + 'num_pcu_on.ge.1.and.' +
            '(time_since_saa.gt.10.or.' +  #South Atlantic Anomality
            'time_since_saa.lt.0.0)')

        if mean <= 500:
            sel += '.and.electron2.lt.0.1'

        command = [
            'maketime',
            f,  # Name of FITS file
            gti,  # Name of output FITS file
            sel,  # Selection expression
            'compact=no',  # Flag yes, if HK format is compact
            'time="TIME"'
        ]  # Column containing HK parameter times

        if os.path.exists(f):
            shell.execute(command)
            # Check if gti file is empty (exclude if so)
            hdulist = fits.open(gti)
            data = hdulist[1].data
            if len(data) == 0:
                gti = float('nan')
        else:
            git = float('nan')

        d['obsids'].append(obsid)
        d['filters'].append(f)
        d['gti'].append(gti)

    # Update database and save
    df = pd.DataFrame(d)
    db = database.merge(db, df, ['filters', 'gti'])
    database.save(db)
    logs.stop_logging()
예제 #10
0
def set(cid, data, table = 'cache', expire = None, headers = None):
  """
   Store data in the persistent cache.
  
   The persistent cache is split up into four database
   tables. Contributed plugins can add additional tables.
  
   'cache_page': This table stores generated pages for anonymous
   users. This is the only table affected by the page cache setting on
   the administrator panel.
  
   'cache_menu': Stores the cachable part of the users' menus.
  
   'cache_filter': Stores filtered pieces of content. This table is
   periodically cleared of stale entries by cron.
  
   'cache': Generic cache storage table.
  
   The reasons for having several tables are as follows:
  
   - smaller tables allow for faster selects and inserts
   - we try to put fast changing cache items and rather static
     ones into different tables. The effect is that only the fast
     changing tables will need a lot of writes to disk. The more
     static tables will also be better cachable with MySQL's query cache
  
   @param cid
     The cache ID of the data to store.
   @param data
     The data to store in the cache. Complex data types will be
     automatically serialized before insertion.
     Strings will be stored as plain text and not serialized.
   @param table
     The table table to store the data in. Valid core values are 'cache_filter',
     'cache_menu', 'cache_page', or 'cache'.
   @param expire
     One of the following values:
     - CACHE_PERMANENT: Indicates that the item should never be removed unless
       explicitly told to using cache_clear_all() with a cache ID.
     - CACHE_TEMPORARY: Indicates that the item should be removed at the next
       general cache wipe.
     - A Unix timestamp: Indicates that the item should be kept at least until
       the given time, after which it behaves like CACHE_TEMPORARY.
   @param headers
     A string containing HTTP php.header information for cached pages.
  """
  if expire is None:
    expire = lib_bootstrap.CACHE_PERMANENT
    fields = {
      'serialized' : 0,
      'created' : REQUEST_TIME,
      'expire' : expire,
      'headers' : headers
    }
  if (not php.is_string(data)):
    fields['data'] = php.serialize(data)
    fields['serialized'] = 1
  else:
    fields['data'] = data
    fields['serialized'] = 0
  lib_database.merge(table).key({'cid' : cid}).fields(fields).execute()
예제 #11
0
def determine_info():
    '''
    Function to split out the files created by Phil's script in find_data_files
    over each obsid folder, allowing code to be executed per obsid. Also
    creates a file with information on each observation
    '''
    purpose = 'Finding information on data files'
    print len(purpose) * '=' + '\n' + purpose + '\n' + len(purpose) * '='

    import os
    import pandas as pd
    import glob
    from collections import defaultdict
    import paths
    import logs
    import execute_shell_commands as shell
    import database

    # Set log file
    filename = __file__.split('/')[-1].split('.')[0]
    logs.output(filename)

    os.chdir(paths.data)

    with open(paths.obsid_list, 'r') as f:
        obsids = [l.strip() for l in f.readlines()]

    db = pd.read_csv(paths.database)
    db['P'] = ['P' + o.split('-')[0] for o in db['obsids']]
    db['paths_obsid'] = paths.data + db['P'] + '/' + db['obsids'] + '/'

    # Find all files created by Phil's xtescan2 script
    all_files = []

    for f in db['P'].unique():
        p = os.path.join(paths.data, f, paths.selection + '*.list*')
        all_files.extend(glob.glob(p))

    # Remove all 500us event files
    # See above Table 1 on heasarc.gsfc.nasa.gov/docs/xte/recipes/bitmasks.html
    all_files = [a for a in all_files if '500us' not in a]

    d = defaultdict(list)

    # Split out values per obsid per mode
    for a in all_files:

        mode = a.split('.')[-2]

        if 'E_' in mode:
            with open(a) as e:
                for line in e:
                    obsid = line.split('/')[0]
                    path = os.getcwd() + '/P' + obsid.split(
                        '-')[0] + '/' + line.split(' ')[0]
                    time = line.split(' ')[2]
                    resolution = line.split(' ')[1].split('_')[1]

                    d['obsids'].append(obsid)
                    d['paths_data'].append(path)
                    d['times'].append(time)
                    d['resolutions'].append(resolution)
                    d['modes'].append('event')
                    d['bitsize'].append(float('NaN'))

        if 'Standard2f' in mode:
            with open(a) as s:
                for i, line in enumerate(s):
                    obsid = line.split('/')[0]
                    path = os.getcwd() + '/P' + obsid.split(
                        '-')[0] + '/' + line.split(' ')[0].split('.')[0]
                    time = line.split(' ')[2]

                    d['obsids'].append(obsid)
                    d['paths_data'].append(path)
                    d['times'].append(time)
                    d['resolutions'].append('16s')
                    d['modes'].append('std2')
                    d['bitsize'].append(float('NaN'))

        if 'Standard1b' in mode:
            with open(a) as s:
                for i, line in enumerate(s):
                    obsid = line.split('/')[0]
                    path = os.getcwd() + '/P' + obsid.split(
                        '-')[0] + '/' + line.split(' ')[0].split('.')[0]
                    time = line.split(' ')[2]

                    d['obsids'].append(obsid)
                    d['paths_data'].append(path)
                    d['times'].append(time)
                    d['resolutions'].append('125ms')
                    d['modes'].append('std1')
                    d['bitsize'].append(float('NaN'))

        if 'GoodXenon1' in mode:
            with open(a) as g:
                for line in g:
                    obsid = line.split('/')[0]
                    path = os.getcwd() + '/P' + obsid.split(
                        '-')[0] + '/' + line.split(' ')[0].split('.')[0]
                    time = line.split(' ')[2]
                    resolution = line.split(' ')[1].split('_')[1]

                    d['obsids'].append(obsid)
                    d['paths_data'].append(path)
                    d['times'].append(time)
                    d['resolutions'].append(resolution)
                    d['modes'].append('gx1')
                    d['bitsize'].append(float('NaN'))

        if 'GoodXenon2' in mode:
            with open(a) as g:
                for line in g:
                    obsid = line.split('/')[0]
                    path = os.getcwd() + '/P' + obsid.split(
                        '-')[0] + '/' + line.split(' ')[0].split('.')[0]
                    time = line.split(' ')[2]
                    resolution = line.split(' ')[1].split('_')[1]

                    d['obsids'].append(obsid)
                    d['paths_data'].append(path)
                    d['times'].append(time)
                    d['resolutions'].append(resolution)
                    d['modes'].append('gx2')
                    d['bitsize'].append(float('NaN'))

        if mode.startswith('B_'):
            with open(a) as e:
                for line in e:
                    obsid = line.split('/')[0]
                    path = os.getcwd() + '/P' + obsid.split(
                        '-')[0] + '/' + line.split(' ')[0]
                    time = line.split(' ')[2]
                    resolution = line.split(' ')[1].split('_')[1]
                    bitsize = mode.split('_')[-1]

                    d['obsids'].append(obsid)
                    d['paths_data'].append(path)
                    d['times'].append(time)
                    d['resolutions'].append(resolution)
                    d['modes'].append('binned')
                    d['bitsize'].append(bitsize)

    # Add information to database
    new_data = pd.DataFrame(d)
    new_c = ['paths_data', 'times', 'resolutions', 'modes', 'bitsize']
    db = database.merge(db, new_data, new_c)

    d = defaultdict(list)

    # List all data files per obsid, per mode, per res
    for obsid in db.obsids.unique():
        print obsid
        condo = (db.obsids == obsid)
        for mode in db[condo].modes.unique():
            condm = condo & (db.modes == mode)
            for res in db[condm].resolutions.unique():
                condr = condm & (db.resolutions == res)

                sf = mode

                # Ensure goodxenon files are listed together
                if mode[:2] == 'gx':
                    condr = condo & (
                        (db.modes == 'gx1') |
                        (db.modes == 'gx2')) & (db.resolutions == res)
                    sf = 'gx'

                # Create subdatabase of values
                sdb = db[condr]
                sdb.drop_duplicates('paths_data')

                # Write paths to file per obsid per mode per resolution
                filename = 'paths_' + sf + '_' + res
                path_to_output = sdb.paths_obsid.values[0] + filename

                # Remove previous versions
                data = list(set(sdb.paths_data))

                with open(path_to_output, 'w') as text:
                    text.write('\n'.join(data) + '\n')

                d['obsids'].append(obsid)
                d['modes'].append(mode)
                d['resolutions'].append(res)
                d['paths_po_pm_pr'].append(path_to_output)

    #Add to database
    new_data = pd.DataFrame(d)
    db = database.merge(db, new_data, ['paths_po_pm_pr'])
    unfound_obsids = db[db.modes.isnull()].obsids.values
    if len(unfound_obsids) > 0:
        print 'ERROR: NO DATA FOR THESE OBSIDS', unfound_obsids
        db = db[db.modes.notnull()]

    database.save(db)
    logs.stop_logging()
예제 #12
0
def create_response():
    '''
    Function to create responses for spectra
    '''

    purpose = 'Creating responses'
    print len(purpose) * '=' + '\n' + purpose + '\n' + len(purpose) * '='

    import os
    import pandas as pd
    import glob
    from astropy.io import fits
    from collections import defaultdict
    from math import isnan
    import paths
    import logs
    import execute_shell_commands as shell
    import database

    # Set log file
    filename = __file__.split('/')[-1].split('.')[0]
    logs.output(filename)

    os.chdir(paths.data)
    db = pd.read_csv(paths.database)

    # Only want std2 data
    d = defaultdict(list)
    for sp, group in db[(db.modes == 'std2')].groupby('spectra'):

        # Determine variables
        obsid = group.obsids.values[0]
        path_obsid = group.paths_obsid.values[0]
        bkg_sp = group.spectra_bkg.values[0]
        fltr = group.filters.values[0]

        # Check whether extracting per layer
        layers = False
        if sp.endswith('_per_layer.pha'):
            layers = True

        # Setup names
        # Must be short, otherwise it can't be written in the header of the
        # spectrum file
        out = path_obsid + 'sp.rsp'

        print obsid

        # Set up the command for pcarsp
        pcarsp = [
            'pcarsp',
            '-f' + sp,  #Input
            '-a' + fltr,  #Filter file
            '-n' + out,  #Output file
            '-s'
        ]  #Use smart std2 mode

        # Create responses
        shell.execute(pcarsp)
        #shell.execute(bkgpcarsp)

        # pcarsp doesn't allow for long file name to be written in the header
        # of the spectrum, so have to manually do it
        # Must have astropy version >1.0. Trust me.
        hdulist = fits.open(sp, mode='update')
        hdu = hdulist[1]
        hdu.header['RESPFILE'] = out
        hdulist.flush()  #.writeto(sp, clobber=True)

        d['spectra'].append(sp)
        d['rsp'].append(out)
        #d['rsp_bkg'].append(out_bkg)

    # Update database and save
    df = pd.DataFrame(d)
    db = database.merge(db, df, ['rsp'])
    database.save(db)
    logs.stop_logging()
예제 #13
0
def create_power_spectra():
    '''
    Function to generate power spectral density based on RXTE lightcurves.
    '''

    # Let the user know what's going to happen
    purpose = 'Creating Power Spectra'
    print len(purpose) * '=' + '\n' + purpose + '\n' + len(purpose) * '='

    import os
    import pandas as pd
    import glob
    from collections import defaultdict
    from math import isnan
    import paths
    import logs
    import execute_shell_commands as shell
    import database

    # Set log file
    filename = __file__.split('/')[-1].split('.')[0]
    logs.output(filename)

    # Get database
    os.chdir(paths.data)
    db = pd.read_csv(paths.database)

    d = defaultdict(list)
    for path_lc, group in db.groupby('bkg_corrected_lc'):

        # Check whether x-ray flare was present
        path_bkg = group.rebinned_bkg.values[0]
        flare = False
        if 'lc_no_flare' in group:
            if pd.notnull(group.lc_no_flare.values[0]):
                flare = True
                former_lc = path_lc
                path_lc = group.lc_no_flare.values[0]
                path_bkg = group.bkg_no_flare.values[0]

        # Determine parameters
        obsid = group.obsids.values[0]
        path_obsid = group.paths_obsid.values[0]
        mode = group.modes.values[0]
        res = group.resolutions.values[0]

        if mode == 'gx2':
            mode = 'gx'

        print obsid, mode, res

        # Find std1 path
        try:
            std1 = db[((db.obsids == obsid) &
                       (db.modes == 'std1'))].paths_data.iloc[0]
            path_std1 = glob.glob(std1 + '*')[0]
        except IndexError:
            print(
                'ERROR: No std1 file for this obsid. Aborting power spectrum.')
            continue

        # Determine the maximum number of pcus on during the observation
        npcu = group.npcu.values[0]

        # Calculate power spectrum
        output = power_spectrum(path_lc, path_bkg, path_std1, npcu)

        if output:
            ps, ps_er, ps_sq, num_seg, freq, freq_er = output
            path_ps = path_obsid + mode + '_' + res + '.ps'

            # Create file within obsid folder
            with open(path_ps, 'w') as f:
                # For each value in a power spectrum
                for i, value in enumerate(ps):
                    line = (repr(value) + ' ' + repr(ps_er[i]) + ' ' +
                            repr(freq[i]) + ' ' + repr(freq_er[i]) + ' ' +
                            repr(ps_sq[i]) + ' ' + repr(num_seg) + '\n')
                    f.write(line)

            if not flare:
                d['bkg_corrected_lc'].append(path_lc)
                d['lc_no_flare'].append(float('NaN'))
                d['power_spectra'].append(path_ps)
            else:
                d['bkg_corrected_lc'].append(former_lc)
                d['lc_no_flare'].append(path_lc)
                d['power_spectra'].append(path_ps)

    # Update database and save
    df = pd.DataFrame(d)
    db = database.merge(db, df, ['power_spectra'])
    database.save(db)
    logs.stop_logging()
예제 #14
0
def pcu_filters():
    '''
    Function to determine if pcu changes have take place, and if so cut 32s
    around them. Used the times scripts to save the times of pcu changes to
    a database
    '''

    purpose = 'Determine if number of PCUs has changed'
    print len(purpose) * '=' + '\n' + purpose + '\n' + len(purpose) * '='

    import os
    import pandas as pd
    import glob
    from astropy.io import fits
    from collections import defaultdict
    import paths
    import logs
    import execute_shell_commands as shell
    import database

    # Set log file
    filename = __file__.split('/')[-1].split('.')[0]
    logs.output(filename)

    os.chdir(paths.data)
    db = pd.read_csv(paths.database)

    d = defaultdict(list)
    for obsid, group in db.groupby(['obsids']):
        filt = group.filters.values[0]

        # Import data
        try:
            hdulist = fits.open(filt)
        except IOError:
            print 'ERROR: File not found for obsid ', obsid
            continue
        tstart = hdulist[0].header['TSTART']
        timezero = hdulist[0].header['TIMEZERO']
        num_pcu_on = hdulist[1].data.field('NUM_PCU_ON')
        time = hdulist[1].data.field('Time')

        # Remember time has an offset due to spacecraft time
        #time -= time[0]
        time += timezero
        # Counter to determine when the number of pcus changes
        pcu = num_pcu_on[0]
        # The acceptable time range
        t_range = repr(time[0]) + '-'

        for i, n in enumerate(num_pcu_on):
            # Check if the number of pcus has changed
            if n != pcu:

                pcu = n

                # Cut 32s around it
                low_t = time[i] - 16
                high_t = time[i] + 16
                previous_t = float(t_range.split('-')[-2].split(',')[-1])

                # Check whether there's any overlap
                if low_t <= previous_t:
                    # Replace the previous upper time if there is
                    t_range = t_range.replace(
                        t_range.split('-')[-2].split(',')[-1], repr(high_t))
                    continue
                else:
                    t_range += repr(low_t) + ','

                # Check whether you've reached the end
                if high_t > time[-1]:
                    t_range = t_range[:-1]
                    break
                else:
                    t_range += repr(high_t) + '-'

        if t_range[-1] == '-':
            t_range += repr(time[-1])

        print obsid, '-->', t_range

        filename = group.paths_obsid.values[0] + 'times_pcu.dat'
        with open(filename, 'w') as f:
            text = t_range.replace(',', '\n').replace('-', ' ')
            f.write(text + '\n')

        # Note that I'm only saving the maximum pcu number
        nupcu = max(num_pcu_on[1:])
        d['npcu'].append(nupcu)
        d['obsids'].append(obsid)
        d['times_obsid'].append(str(tstart + timezero) + '-' + str(time[-1]))
        d['times_pcu'].append(filename)

    # Add starting times of each obsid to database
    df = pd.DataFrame(d)
    db = database.merge(db, df, ['times_obsid', 'times_pcu', 'npcu'])
    database.save(db)
    logs.stop_logging()
예제 #15
0
def correct_for_background():
    '''
    Function to intrapolate background files to correct various mode files for
    the corresponding background count rate
    '''

    purpose = 'Accounting for backgrounds'
    print len(purpose)*'=' + '\n' + purpose + '\n' + len(purpose)*'='

    import os
    import pandas as pd
    import glob
    from collections import defaultdict
    from math import isnan
    import paths
    import logs
    import execute_shell_commands as shell
    import database

    # Set log file
    filename = __file__.split('/')[-1].split('.')[0]
    logs.output(filename)

    os.chdir(paths.data)
    db = pd.read_csv(paths.database)

    d = defaultdict(list)
    for path_lc, group in db.groupby('lightcurves'):

        # Layer background subtraction is done in xspec, so skip these
        if path_lc.endswith('per_layer.lc'):
            continue

        obsid = group.obsids.values[0]
        path_obsid = group.paths_obsid.values[0]
        path_bkg = group.lightcurves_bkg.values[0]
        res = group.resolutions.values[0]
        mode = group.modes.values[0]

        # Std2 files won't have a high enough time resolution to create
        # power colours in the high band
        if mode == 'std2' or mode == 'std1':
            continue

        if (mode == 'gx1' or mode == 'gx2'):
            mode = 'gx'
        print obsid, mode, res

        # Rebin, and create a corrected version
        paths = rebin(path_obsid, path_lc, path_bkg, mode, res)
        rebinned_bkg = paths[0]
        bkg_corrected_lc = paths[1]

        d['lightcurves'].append(path_lc)
        d['rebinned_bkg'].append(rebinned_bkg)
        d['bkg_corrected_lc'].append(bkg_corrected_lc)

    # Update database and save
    df = pd.DataFrame(d)
    db = database.merge(db,df,['rebinned_bkg','bkg_corrected_lc'])
    database.save(db)
    logs.stop_logging()
예제 #16
0
def goodxenon_to_fits():
    '''
    Function to convert GoodXenon files to fitsfiles using make_se. Subsequently
    groups the paths to the produced files into a file
    path_gxfits_<resolution> and updates db.
    '''

    purpose = 'Converting GoodXenon files to fits files'
    print len(purpose) * '=' + '\n' + purpose + '\n' + len(purpose) * '='

    import os
    import pandas as pd
    import glob
    from collections import defaultdict
    import paths
    import logs
    import execute_shell_commands as shell
    import database

    # Set log file
    filename = __file__.split('/')[-1].split('.')[0]
    logs.output(filename)

    os.chdir(paths.data)
    db = pd.read_csv(paths.database)

    # Running it over gx1 or gx2 will give same result, but only needs to be
    # run once
    if 'gx1' not in db.modes.unique():
        print 'No GoodXenon files found'
        return

    # Run maketime for each obsid
    sdb = db[db.modes == 'gx1']

    sdb['gxfits'] = sdb.paths_obsid + 'gxfits_' + sdb.resolutions

    # Create a list of the gxfits files
    d = defaultdict(list)

    for i, row in sdb.iterrows():
        # Create goodxenon fits files
        command = [
            'make_se',
            '-i',  #Input file with list to gx1 and gx2 files
            row.paths_po_pm_pr,
            '-p',  #Output the prefix for the goodxenon files
            row.gxfits
        ]

        shell.execute(command)

        gxfiles = row.paths_obsid + 'gxfits_' + row.resolutions + '*'
        paths_gx = glob.glob(gxfiles)

        d['obsids'].append(row.obsids)
        d['modes'].append(row.modes)
        d['resolutions'].append(row.resolutions)
        path_gx = row.paths_obsid + 'paths_gxfits_' + row.resolutions
        d['paths_gx'].append(path_gx)

        with open(path_gx, 'w') as text:
            text.write('\n'.join(paths_gx) + '\n')

    # Ensure gx2 has the same data as gx1
    for k in d:
        if k != 'modes':
            d[k].extend(d[k])
        else:
            d[k].extend(['gx2' for g in d[k]])
    df = pd.DataFrame(d)

    # Ensure that the column paths_gx is updated
    db = database.merge(db, df, ['paths_gx'])

    database.save(db)
    logs.stop_logging()
예제 #17
0
파일: cache.py 프로젝트: vaibbhav/drupy
def set(cid, data, table='cache', expire=None, headers=None):
    """
   Store data in the persistent cache.
  
   The persistent cache is split up into four database
   tables. Contributed plugins can add additional tables.
  
   'cache_page': This table stores generated pages for anonymous
   users. This is the only table affected by the page cache setting on
   the administrator panel.
  
   'cache_menu': Stores the cachable part of the users' menus.
  
   'cache_filter': Stores filtered pieces of content. This table is
   periodically cleared of stale entries by cron.
  
   'cache': Generic cache storage table.
  
   The reasons for having several tables are as follows:
  
   - smaller tables allow for faster selects and inserts
   - we try to put fast changing cache items and rather static
     ones into different tables. The effect is that only the fast
     changing tables will need a lot of writes to disk. The more
     static tables will also be better cachable with MySQL's query cache
  
   @param cid
     The cache ID of the data to store.
   @param data
     The data to store in the cache. Complex data types will be
     automatically serialized before insertion.
     Strings will be stored as plain text and not serialized.
   @param table
     The table table to store the data in. Valid core values are 'cache_filter',
     'cache_menu', 'cache_page', or 'cache'.
   @param expire
     One of the following values:
     - CACHE_PERMANENT: Indicates that the item should never be removed unless
       explicitly told to using cache_clear_all() with a cache ID.
     - CACHE_TEMPORARY: Indicates that the item should be removed at the next
       general cache wipe.
     - A Unix timestamp: Indicates that the item should be kept at least until
       the given time, after which it behaves like CACHE_TEMPORARY.
   @param headers
     A string containing HTTP php.header information for cached pages.
  """
    if expire is None:
        expire = lib_bootstrap.CACHE_PERMANENT
        fields = {
            'serialized': 0,
            'created': REQUEST_TIME,
            'expire': expire,
            'headers': headers
        }
    if (not php.is_string(data)):
        fields['data'] = php.serialize(data)
        fields['serialized'] = 1
    else:
        fields['data'] = data
        fields['serialized'] = 0
    lib_database.merge(table).key({'cid': cid}).fields(fields).execute()
예제 #18
0
def calculate_hi(low_e=3.0, high_e=16.0, soft=(6.4, 9.7), hard=(9.7, 16.)):
    '''
    Function to calculate hardness & intensity values.

    Arguments:
        Energy ranges in keV
        - low_e (float): Lower energy boundary for selection
        - high_e (float): Higher energy boundary for selection
        - soft (tuple of floats): Energy range between which to integrate
                                  the soft range
        - hard (tuple of floats): Energy range between which to integrate
                                  the hard range
    '''

    purpose = 'Calculating hardness & intensity values'
    print len(purpose) * '=' + '\n' + purpose + '\n' + len(purpose) * '='
    print 'Soft:', soft, 'Hard:', hard, '\n' + len(purpose) * '-'

    import os
    import pandas as pd
    import glob
    import xspec
    from collections import defaultdict
    from math import isnan
    from numpy import genfromtxt
    import paths
    import logs
    import execute_shell_commands as shell
    import database

    # Set log file
    filename = __file__.split('/')[-1].split('.')[0]
    logs.output(filename)

    # Import data
    os.chdir(paths.data)
    db = pd.read_csv(paths.database)

    # Compile Fortran code for later use
    cmpl = [
        'gfortran', paths.subscripts + 'integflux.f', '-o',
        paths.subscripts + 'integflux.xf'
    ]
    shell.execute(cmpl)

    # Only want spectra from std2
    d = defaultdict(list)
    for sp, group in db[(db.modes == 'std2')].groupby('spectra'):

        # Determine variables
        obsid = group.obsids.values[0]
        path_obsid = group.paths_obsid.values[0]
        bkg_sp = group.spectra_bkg.values[0]
        rsp = group.rsp.values[0]
        fltr = group.filters.values[0]

        print obsid

        # Check whether response file is there
        if not os.path.isfile(rsp):
            print 'ERROR: No response file'
            continue

        # XSPEC Commands to unfold spectrum around flat powerlaw
        # Reason as per Heil et al. (see doi:10.1093/mnras/stv240):
        # "In order to measure the energy spectral hardness independantly of
        # long term changes in the PCA instrument response, fluxes are
        # generated in a model-independant way by dividing the PCA standard
        # 2 mode spectrum by the effective area of the intstrument response
        # in each spectral channel. This is carried out by unfolding the
        # spectrum with respect to a zero-slope power law (i.e. a constant)
        # in the XSPEC spectral-fitting software, and measuring the unfolded
        # flux over the specified energy range (interpolating where the
        # specified energy does not fall neatly at the each of a spectral
        # channel)."
        #xspec.Plot.device = '/xs'

        s1 = xspec.Spectrum(sp)
        s1.background = bkg_sp
        s1.response = os.path.join(paths.data, rsp)
        # Not really sure why you need to do ignore, and then notice
        s1.ignore('**-' + str(low_e + 1.) + ' ' + str(high_e - 1) + '-**')
        s1.notice(str(low_e) + '-' + str(high_e))
        xspec.Model('powerlaw')
        xspec.AllModels(1).setPars(0.0, 1.0)  # Index, Norm
        xspec.AllModels(1)(1).frozen = True
        xspec.AllModels(1)(2).frozen = True
        xspec.Plot('eufspec')

        # Output unfolded spectrum to lists
        e = xspec.Plot.x()
        e_err = xspec.Plot.xErr()
        ef = xspec.Plot.y()
        ef_err = xspec.Plot.yErr()
        model = xspec.Plot.model()

        # Pipe output to file
        eufspec = path_obsid + 'eufspec.dat'
        with open(eufspec, 'w') as f:
            #Give header of file - must be three lines
            h = [
                '#Unfolded spectrum', '#',
                '#Energy EnergyError Energy*Flux Energy*FluxError ModelValues'
            ]
            f.write('\n'.join(h) + '\n')
            for i in range(len(e)):
                data = [e[i], e_err[i], ef[i], ef_err[i], model[i]]
                line = [str(j) for j in data]
                f.write(' '.join(line) + '\n')

        # Create a file to input into integflux
        integflux = path_obsid + 'integflux.in'
        with open(integflux, 'w') as f:
            #intgr_low, intgr_high, soft_low, soft_high, hard_low, hard_high
            line = [
                'eufspec.dat',
                str(low_e),
                str(high_e),
                str(soft[0]),
                str(soft[-1]),
                str(hard[0]),
                str(hard[-1])
            ]
            line = [str(e) for e in line]
            f.write(' '.join(line) + '\n')

        # Remove previous versions of the output
        if os.path.isfile(path_obsid + 'hardint.out'):
            os.remove(path_obsid + 'hardint.out')

        # Run fortran script to create calculate hardness-intensity values
        # Will output a file with the columns (with flux in Photons*ergs/cm^2/s)
        # flux flux_err ratio ratio_error
        os.chdir(path_obsid)
        shell.execute(paths.subscripts + 'integflux.xf')
        os.chdir(paths.data)

        # Get ouput of the fortran script
        txt = genfromtxt(path_obsid + 'hardint.out')
        flux = float(txt[0])
        flux_err = float(txt[1])
        ratio = float(txt[2])
        ratio_err = float(txt[3])

        d['spectra'].append(sp)
        d['flux_i3t16_s6p4t9p7_h9p7t16'].append(flux)
        d['flux_err_i3t16_s6p4t9p7_h9p7t16'].append(flux_err)
        d['hardness_i3t16_s6p4t9p7_h9p7t16'].append(ratio)
        d['hardness_err_i3t16_s6p4t9p7_h9p7t16'].append(ratio_err)

        # Clear xspec spectrum
        xspec.AllData.clear()

    # Update database and save
    df = pd.DataFrame(d)
    cols = [
        'flux_i3t16_s6p4t9p7_h9p7t16', 'flux_err_i3t16_s6p4t9p7_h9p7t16',
        'hardness_i3t16_s6p4t9p7_h9p7t16',
        'hardness_err_i3t16_s6p4t9p7_h9p7t16'
    ]
    db = database.merge(db, df, cols)
    print 'Number of unique elements in database'
    print '======================='
    print db.apply(pd.Series.nunique)
    print '======================='
    print 'Pipeline completed'
    database.save(db)
    logs.stop_logging()