Beispiel #1
0
    def __init__(self,
                 complete_dives,
                 creation_datestr=datetime.datetime.now(
                     pytz.UTC).isoformat().split(".")[0] + "Z",
                 version='2.0',
                 delimiter=',',
                 lineterminator='\n'):

        if not all(
                isinstance(complete_dive, dives.Complete_Dive)
                for complete_dive in complete_dives):
            raise ValueError(
                'Input `complete_dives` must be list of `dives.Complete_Dives` instances'
            )

        self.complete_dives = complete_dives
        self.creation_datestr = creation_datestr
        self.version = version
        self.delimiter = delimiter
        self.lineterminator = lineterminator

        # Attach header lines
        self.dataset_header = ['#dataset: GeoCSV ' + self.version]
        self.created_header = ['#created: ' + self.creation_datestr]
        self.version_header = [
            '#automaid: {} ({})'.format(setup.get_version(), setup.get_url())
        ]
        self.delimiter_header = ['#delimiter: ' + repr(self.delimiter)]
        self.lineterminator_header = [
            '#lineterminator: ' + repr(self.lineterminator)
        ]

        self.field_unit_header = [
            '#field_unit', 'ISO_8601', 'unitless', 'unitless', 'unitless',
            'unitless', 'degrees_north', 'degrees_east', 'meters', 'meters',
            'unitless', 'factor', 'hertz', 'unitless', 'hertz', 'seconds',
            'seconds'
        ]

        self.field_type_header = [
            '#field_type', 'datetime', 'string', 'string', 'string', 'string',
            'float', 'float', 'float', 'float', 'string', 'float', 'float',
            'string', 'float', 'float', 'float'
        ]

        self.MethodIdentifier_header = [
            'MethodIdentifier', 'StartTime', 'Network', 'Station', 'Location',
            'Channel', 'Latitude', 'Longitude', 'Elevation', 'Depth',
            'SensorDescription', 'Scale', 'ScaleFrequency', 'ScaleUnits',
            'SampleRate', 'TimeDelay', 'TimeCorrection'
        ]

        self.MethodIdentifier_Measurement = 'Measurement:GPS:{:s}'.format(
            utils.get_gps_instrument_name().replace(' ', '_'))
        self.MethodIdentifier_Algorithm = 'Algorithm:automaid:{:s}'.format(
            setup.get_version())
Beispiel #2
0
def get_built_in_vars(use_pkg_path, path_offset):
    """
    This system has a few built-in variables that can be referenced in the .use files. These variables will then be
    replaced with their values with a simple text replacement. This function defines these variables and returns them in
    the format of a dictionary.

    At the moment, the variables the system understands are:

    VERSION <- the version number of the current use package.
    USE_PKG_PATH <- a path to where the use package is.
    VERSION_PATH <- a path up to where the version is.
    PRE_VERSION_PATH <- a path up to the version, but not including the version.

    :param use_pkg_path: The path to the use package we want to get the version from.
    :param path_offset: The number of paths to step up through to find the version number. Defaults to the global
           variable AUTO_VERSION_OFFSET. Can be either a positive or negative value. Only the absolute value is used.

    :return: A dict where the key is the variable name, and the value is the value.
    """

    output = dict()

    version_path = setup.get_version_path(use_pkg_path, path_offset)

    output["PRE_VERSION_PATH"] = os.path.split(version_path)[0]
    output["USE_PKG_PATH"] = os.path.split(use_pkg_path)[0]
    output["VERSION_PATH"] = version_path
    output["VERSION"] = setup.get_version(use_pkg_path, path_offset)

    return output
Beispiel #3
0
def write_traces_txt(dive_logs, creation_datestr, processed_path, mfloat_path):
    event_dive_tup = (
        (event, dive) for dive in dive_logs for event in dive.events
        if event.station_loc and not event.station_loc_is_preliminary)

    traces_file = os.path.join(processed_path, mfloat_path, "traces.txt")
    fmt_spec = '{:<47s}    {:>15s}    {:>15s}    {:>15s}    {:>15s}    {:>15s}    {:>15s}    {:>15s}\n'

    version_line = "#automaid {} ({})\n".format(setup.get_version(),
                                                setup.get_url())
    created_line = "#created {}\n".format(creation_datestr)
    header_line = "#                               filename                   bin_mer      prev_dive_log  prev_dive_env_mer      this_dive_log  this_dive_env_mer      next_dive_log  next_dive_env_mer\n"

    with open(traces_file, "w+") as f:
        f.write(version_line)
        f.write(created_line)
        f.write(header_line)

        for e, d in sorted(event_dive_tup,
                           key=lambda x: x[0].corrected_starttime):
            f.write(
                fmt_spec.format(e.processed_file_name, e.mer_binary_name,
                                d.prev_dive_log_name,
                                d.prev_dive_mer_environment_name, d.log_name,
                                d.mer_environment_name, d.next_dive_log_name,
                                d.next_dive_mer_environment_name))
Beispiel #4
0
def write_loc_txt(complete_dives, creation_datestr, processed_path,
                  mfloat_path):
    '''Writes interpolated station locations at the time of event recording for all events for each
    individual float

    '''

    event_list = [
        event for dive in complete_dives for event in dive.events
        if event.station_loc and not event.station_loc_is_preliminary
    ]

    loc_file = os.path.join(processed_path, mfloat_path, "loc.txt")
    fmt_spec = "{:<47s}    {:>10.6f}    {:>11.6f}    {:>6.0f}\n"

    version_line = "#automaid {} ({})\n".format(setup.get_version(),
                                                setup.get_url())
    created_line = "#created {}\n".format(creation_datestr)
    header_line = "#                               filename          interp_STLA    interp_STLO      STDP\n"

    with open(loc_file, "w+") as f:
        f.write(version_line)
        f.write(created_line)
        f.write(header_line)

        for e in sorted(event_list, key=lambda x: x.corrected_starttime):
            f.write(
                fmt_spec.format(e.processed_file_name,
                                np.float32(e.obspy_trace_stats.sac["stla"]),
                                np.float32(e.obspy_trace_stats.sac["stlo"]),
                                np.float32(e.obspy_trace_stats.sac["stdp"])))
Beispiel #5
0
def write_dives_txt(dive_logs, creation_datestr, processed_path, mfloat_path):
    '''Writes dives.txt, which treats every .LOG as a single (possibly incomplete) dive

    Prints all data for every .LOG/.MER in the server; does not, e.g., only
    print info associated with those .LOG/.MER within datetime range of  `main.py`

    '''

    dives_file = os.path.join(processed_path, mfloat_path, "dives.txt")
    fmt_spec = "{:>8s}    {:>20s}    {:>20s}    {:>10d}    {:>9.3f}    {:>15s}    {:>15s}\n"

    version_line = "#automaid {} ({})\n".format(setup.get_version(),
                                                setup.get_url())
    created_line = "#created {}\n".format(creation_datestr)
    header_line = "#dive_id               log_start                 log_end      len_secs     len_days           log_name       mer_env_name\n".format(
    )

    with open(dives_file, "w+") as f:
        f.write(version_line)
        f.write(created_line)
        f.write(header_line)

        # 1 .LOG == 1 dive
        for d in sorted(dive_logs, key=lambda x: x.start_date):
            f.write(
                fmt_spec.format(str(d.dive_id),
                                str(d.start_date)[:19] + 'Z',
                                str(d.end_date)[:19] + 'Z', int(d.len_secs),
                                d.len_days, d.log_name,
                                d.mer_environment_name))
def main():
    parser = optparse.OptionParser(description=description, usage=usage)

    parser.add_option('--version', dest='version', action='store_true',
                      help='display version then exit')
    parser.add_option('--install-dir', dest='idir', type=str, metavar='DIR',
                      help='installation directory DIR')
    parser.add_option('--a', dest='filea', type=str, metavar='FILE',
                      help='first file FILE')
    parser.add_option('--b', dest='fileb', type=str, metavar='FILE',
                      help='second file FILE')
    parser.add_option('--c', dest='filec', type=str, metavar='FILE',
                      help='merged file FILE')
    parser.add_option('--debug', dest='debug', action='store_true',
                      help='display contents of merged file to stdout')

    (options, _args) = parser.parse_args()
    if options.version:
        print setup.get_version()
        exit(0)

    errmsg = []
    if options.idir is None:
        errmsg.append('no installation directory specified')
    if options.filea is None:
        errmsg.append('no first filename specified')
    if options.fileb is None:
        errmsg.append('no second filename specified')
    if options.filec is None:
        errmsg.append('no merged filename specified')
    if len(errmsg) > 0:
        print '\n'.join(errmsg)
        exit(1)

    merged_cfg = setup.merge_config_files(options.filea, options.fileb,
                                          options.idir)

    if options.debug:
        printdict(merged_cfg)
    else:
        tmpfile = tempfile.NamedTemporaryFile("w", 1)
        merged_cfg.write(tmpfile)
        if os.path.exists(options.filec):
            _bup_cfg = setup.save_path(options.filec)
        shutil.copyfile(tmpfile.name, options.filec)
Beispiel #7
0
    def test_setup(self, setup_func):

        # setup file has correct version?
        from setup import get_version
        setup_version = get_version()
        self.assertEqual(setup_version, chromepdf_version)

        # setup has correct long description?
        from setup import get_long_description
        setup_long_description = get_long_description()
        readme_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'README.md')
        with open(readme_path, 'r') as f:
            readme_contents = f.read()
        self.assertGreater(len(readme_contents), 1)
        self.assertEqual(setup_long_description, readme_contents)
Beispiel #8
0
def write_complete_dives_txt(complete_dives, creation_datestr, processed_path,
                             mfloat_path, mfloat):
    '''Writes complete_dives.txt and prints the same info to stdout

    A complete dive is either: (1) wholly defined in a single .LOG file, or (2)
    a concatenation of many (fragmented/error/reboot/testmd) .LOG files that lie
    in-between single-.LOG complete dives

    Prints all data for every .LOG/.MER in the server; does not, e.g., only
    print info associated with those .LOG/.MER within datetime range of `main.py`

    '''

    complete_dives_file = os.path.join(processed_path, mfloat_path,
                                       "complete_dives.txt")
    version_line = "#automaid {} ({})\n".format(setup.get_version(),
                                                setup.get_url())
    created_line = "#created {}\n".format(creation_datestr)

    with open(complete_dives_file, "w+") as f:
        f.write(version_line)
        f.write(created_line)

        for d in sorted(complete_dives, key=lambda x: x.start_date):
            print("Complete Dive")
            f.write("Complete Dive\n")

            # These methods both return, and print to stdout, the same formatted string
            f.write(d.print_len())
            f.write(d.print_log_mer_id())
            f.write(d.print_events())

            print ""
            f.write("\n")

        # Determine the total number of SAC and/or miniSEED files that COULD be
        # written (but do not necessarily exists, e.g., if `events_sac=False` in
        # main.py).
        sac_str = "    {:s} total: {:d} (non-preliminary) SAC & miniSEED files\n".format(mfloat, \
                  len([e for d in complete_dives for e in d.events if e.station_loc and not e.station_loc_is_preliminary]))

        print(sac_str)
        f.write(sac_str)
Beispiel #9
0
def write_metadata(complete_dives, creation_datestr, processed_path,
                   mfloat_path):
    '''Write mseed2sac metadata and automaid metadata files.

    Update this function if the fields in method
    `events.attach_obspy_trace_stats` are changed.

    In total six files are written:

    mseed2sac_metadata_DET_REQ.csv (DET and REQ files, used by mseed2sac)
    mseed2sac_metadata_DET.csv (DET files only)
    mseed2sac_metadata_REQ.csv (REQ files only)

    automaid_metadata_DET_REQ.csv (DET and REQ files, ALL and ONLY SAC info defined in automaid)
    automaid_metadata_DET.csv (DET files only)
    automaid_metadata_REQ.csv (REQ files only)

    msee2sac_metadata*.csv:

        Usage: mseed2sac -m mseed2sac_metadata.csv *mseed

        From: https://github.com/iris-edu/mseed2sac/blob/master/doc/mseed2sac.md

        (01) Network (KNETWK)
        (02) Station (KSTNM)
        (03) Location (KHOLE)
        (04) Channel (KCMPNM)
        (05) Latitude (STLA)
        (06) Longitude (STLO)
        (07) Elevation (STEL), in meters [not currently used by SAC]
        (08) Depth (STDP), in meters [not currently used by SAC]
        (09) Component Azimuth (CMPAZ), degrees clockwise from north
        (10) Component Incident Angle (CMPINC), degrees from vertical
        (11) Instrument Name (KINST), up to 8 characters
        (12) Scale Factor (SCALE)
        (13) Scale Frequency, unused
        (14) Scale Units, unused
        (15) Sampling rate, unused
        (16) Start time, used for matching
        (17) End time, used for matching

    automaid_metadata*.csv:

        Prints ALL and ONLY the non-default SAC headers filled by automaid:

        (01) file name (from automaid; not a SAC header field)
        (02) KNETWK
        (03) KSTNM
        (04) KHOLE
        (05) KCMPNM
        (06) STLA
        (07) STLO
        (08) STEL
        (09) STDP
        (10) CMPAZ
        (11) CMPINC
        (12) KINST
        (13) SCALE
        (14) USER0 (SNR)
        (15) USER1 (criterion)
        (16) USER2 (trig)
        (17) USER3 (clockdrift correction)
        (18) KUSER0 (automaid version)
        (19) KUSER1 (REQ or DET and scales)
        (20) KUSER2 (CDF24 edge correction and normalization)
        (21) samplerate (not a SAC header field)
        (22) start (not a SAC header field)
        (23) end (not a SAC header field)

    '''

    ## NB, concerning filename abbreviations:
    ## m2s_* == mseed2sac
    ## atm_* == automaid*
    ##
    ## Previous versions wrote formatted text files for easy human readability.
    ## Those have since been nixed because it turned out that mseed2sac would
    ## actually generate convert miniSEED and generate SAC files with those
    ## metadata*.txt files, without warning that it only partially filled the
    ## header (e.g., skipping STLA/STLO because the .txt was not formatted in
    ## the expected .csv style).  The code to generate those is left here...

    # Version and creation-date lines are the same for both
    version_line = "#automaid {} ({})\n".format(setup.get_version(),
                                                setup.get_url())
    created_line = "#created {}\n".format(creation_datestr)

    # Generate header lines for all four files: generate .csv by replacing
    # spaces with commas in text format
    m2s_header_line_txt = "#net    sta   loc   chan           lat            lon      elev     depth   azimuth    SACdip  instrument     scale  scalefreq scaleunits samplerate                  start                    end\n"
    m2s_header_line_csv = ','.join(m2s_header_line_txt.split()) + '\n'

    # (add pound after comma substitution)
    atm_header_line_txt = "                               filename KNETWK    KSTNM KHOLE KCMPNM          STLA           STLO STEL      STDP CMPAZ CMPINC      KINST     SCALE            USER0            USER1     USER2            USER3      KUSER0      KUSER1      KUSER2 samplerate                  start                    end\n"
    atm_header_line_csv = '#' + ','.join(atm_header_line_txt.split()) + '\n'
    #atm_header_line_txt = '#' + atm_header_line_txt

    # Field specifiers for mseed2sac_metadata.csv and mseed2sac_metadata.txt
    m2s_fmt = [
        '{:>2s}',  # Network (KNETWK)
        '{:>5s}',  # Station (KSTNM)
        '{:>2s}',  # Location (KHOLE)
        '{:>3s}',  # Channel (KCMPNM)
        '{:>10.6f}',  # Latitude (STLA)
        '{:>11.6f}',  # Longitude (STLO)
        '{:>6.0f}',  # Elevation (STEL), in meters [not currently used by SAC]
        '{:>6.0f}',  # Depth (STDP), in meters [not currently used by SAC]
        '{:>6.0f}',  # Component Azimuth (CMPAZ), degrees clockwise from north
        '{:>6.0f}',  # Component Incident Angle (CMPINC), degrees from vertical
        '{:>8s}',  # Instrument Name (KINST), up to 8 characters
        '{:6.0f}',  # Scale Factor (SCALE)
        '{:>7.1f}',  # Scale Frequency, unused
        '{:>7s}',  # Scale Units, unused
        '{:>7.0f}',  # Sampling rate, unused
        '{:>19s}',  # Start time, used for matching
        '{:>19s}\n'
    ]  # End time, used for matching

    # Add four spaces between each field to format the text file
    #m2s_fmt_txt  = '    '.join(m2s_fmt)

    # Add comma between each field and remove field width (non-decimal) to format the csv
    m2s_fmt_csv = ','.join(m2s_fmt)
    m2s_fmt_csv = re.sub(':>\d*', ':', m2s_fmt_csv)

    # Field specifiers for automaid_metadata.csv and automaid_metadata.txt format
    atm_fmt = [
        '{:>40s}',  # file name (from automaid; not a SAC header field)
        '{:>3s}',  # KNETWK
        '{:>5s}',  # KSTNM
        '{:>2s}',  # KHOLE
        '{:>3s}',  # KCMPNM
        '{:>10.6F}',  # STLA
        '{:>11.6f}',  # STLO
        '{:>1.0F}',  # STEL
        '{:>6.0f}',  # STDP
        '{:>2.0F}',  # CMPAZ
        '{:>3.0f}',  # CMPINC
        '{:s}',  # KINST
        '{:>.0f}',  # SCALE
        '{:>13.6f}',  # USER0 (detection SNR)
        '{:>13.6f}',  # USER1 (detection criterion)
        '{:>6.0f}',  # USER2 (detection trigger sample index)
        '{:>13.6f}',  # USER3 (clockdrift correction)
        '{:>8s}',  # KUSER0 (automaid version)
        '{:>8s}',  # KUSER1 (REQ or DET and scales)
        '{:>8s}',  # KUSER2 (CDF24 edge correction and normalization)
        '{:>7.0f}',  # samplerate (not a SAC header field)
        '{:>19s}',  # start (not a SAC header field)
        '{:>19s}\n'
    ]  # end (not a SAC header field)

    # Add four spaces between each field to format the text file
    #atm_fmt_txt  = '    '.join(atm_fmt)

    # Add comma between each field and remove field width (non-decimal) to format the csv
    atm_fmt_csv = ','.join(atm_fmt)
    atm_fmt_csv = re.sub(':>\d*', ':', atm_fmt_csv)

    # The base path (the folder) is the same for all four files
    base_path = os.path.join(processed_path, mfloat_path)
    m2s_path = os.path.join(base_path, 'mseed2sac_metadata')
    atm_path = os.path.join(base_path, 'automaid_metadata')

    # These are mseed2sac_metadata values that do not differ(yet?) between MERMAIDs
    scalefreq = np.float32(1.)
    scaleunits = 'Pa'

    # Open all files
    with open(m2s_path+"_DET_REQ.csv", "w+") as m2s_dr_csv, \
         open(m2s_path+"_DET.csv", "w+") as m2s_d_csv, \
         open(m2s_path+"_REQ.csv", "w+") as m2s_r_csv, \
         open(atm_path+'_DET_REQ.csv', "w+") as atm_dr_csv, \
         open(atm_path+'_DET.csv', "w+") as atm_d_csv, \
         open(atm_path+'_REQ.csv', "w+") as atm_r_csv:

        # open(m2s_path+".txt", "w+") as m2s_f_txt, \
        # open(atm_path+'.txt', "w+") as atm_f_txt, \

        ## Write version line and header line to all four files

        m2s_dr_csv.write(version_line)
        m2s_dr_csv.write(created_line)
        m2s_dr_csv.write(m2s_header_line_csv)

        m2s_d_csv.write(version_line)
        m2s_d_csv.write(created_line)
        m2s_d_csv.write(m2s_header_line_csv)

        m2s_r_csv.write(version_line)
        m2s_r_csv.write(created_line)
        m2s_r_csv.write(m2s_header_line_csv)

        # m2s_f_txt.write(version_line)
        # m2s_f_txt.write(created_line)
        # m2s_f_txt.write(m2s_header_line_txt)

        atm_dr_csv.write(version_line)
        atm_dr_csv.write(created_line)
        atm_dr_csv.write(atm_header_line_csv)

        atm_d_csv.write(version_line)
        atm_d_csv.write(created_line)
        atm_d_csv.write(atm_header_line_csv)

        atm_r_csv.write(version_line)
        atm_r_csv.write(created_line)
        atm_r_csv.write(atm_header_line_csv)

        # atm_f_txt.write(version_line)
        # atm_f_txt.write(created_line)
        # atm_f_txt.write(atm_header_line_txt)

        # Loop over all events for which a station location was computed
        event_list = [
            event for dive in complete_dives for event in dive.events
            if event.station_loc and not event.station_loc_is_preliminary
        ]
        for e in sorted(event_list, key=lambda x: x.corrected_starttime):
            ## Collect metadata and convert to np.float32()

            # For mseed2sac_metadata*.csv:
            net = e.obspy_trace_stats["network"]
            sta = e.obspy_trace_stats["station"]
            loc = e.obspy_trace_stats["location"]
            chan = e.obspy_trace_stats["channel"]
            lat = np.float32(e.obspy_trace_stats.sac["stla"])
            lon = np.float32(e.obspy_trace_stats.sac["stlo"])
            elev = np.float32(e.obspy_trace_stats.sac["stel"])
            depth = np.float32(e.obspy_trace_stats.sac["stdp"])
            azimuth = np.float32(e.obspy_trace_stats.sac["cmpaz"])
            SACdip = np.float32(e.obspy_trace_stats.sac["cmpinc"])
            instrument = e.obspy_trace_stats.sac["kinst"]
            scale = np.float32(e.obspy_trace_stats.sac["scale"])
            # scalefreq (local defined above)
            # scaleunits (local defined above)
            samplerate = np.float32(e.obspy_trace_stats["sampling_rate"])
            start = str(e.obspy_trace_stats["starttime"])[:19]
            end = str(e.obspy_trace_stats["endtime"])[:19]

            # Fields unique to automaid_metadata that are not in mseed2sac_metadata*.csv
            # (commented fields are defined in both files)
            filename = e.processed_file_name
            # KNETWK = net  (LHS are SAC names; RHS are their mseed2sac equivalents)
            # KSTNM = sta
            # KHOLE = loc
            # KCMPNM = chan
            # STLA = lat
            # STLO = lon
            # ELEV = elev
            # STDP = depth
            # CMPAZ = azimuth
            # CMPINC = SACdip
            # KINST = instrument
            # SCALE = scale
            USER0 = np.float32(e.obspy_trace_stats.sac["user0"])
            USER1 = np.float32(e.obspy_trace_stats.sac["user1"])
            USER2 = np.float32(e.obspy_trace_stats.sac["user2"])
            USER3 = np.float32(e.obspy_trace_stats.sac["user3"])
            KUSER0 = e.obspy_trace_stats.sac["kuser0"]
            KUSER1 = e.obspy_trace_stats.sac["kuser1"]
            KUSER2 = e.obspy_trace_stats.sac["kuser2"]
            # samplerate
            # start
            # end

            ## Group into correct order

            # mseed2sac_metadata.csv fields
            m2s_meta = [
                net, sta, loc, chan, lat, lon, elev, depth, azimuth, SACdip,
                instrument, scale, scalefreq, scaleunits, samplerate, start,
                end
            ]

            # automaid_metadata.csv fields, with SAC names commented
            atm_meta = [
                filename, net, sta, loc, chan, lat, lon, elev, depth, azimuth,
                SACdip, instrument, scale, USER0, USER1, USER2, USER3, KUSER0,
                KUSER1, KUSER2, samplerate, start, end
            ]

            ## Write DET and REQ info to both
            # m2s_f_txt.write(m2s_fmt_txt.format(*m2s_meta))
            m2s_dr_csv.write(m2s_fmt_csv.format(*m2s_meta))

            #atm_f_txt.write(atm_fmt_txt.format(*atm_meta))
            atm_dr_csv.write(atm_fmt_csv.format(*atm_meta))

            if not e.is_requested:
                m2s_d_csv.write(m2s_fmt_csv.format(*m2s_meta))
                atm_d_csv.write(atm_fmt_csv.format(*atm_meta))

            else:
                m2s_r_csv.write(m2s_fmt_csv.format(*m2s_meta))
                atm_r_csv.write(atm_fmt_csv.format(*atm_meta))
Beispiel #10
0
def write_gps(dive_logs, creation_datestr, processed_path, mfloat_path):
    '''Write complete (raw, full, all, nonunique) GPS data from .LOG and .MER.
    Differs from GeoCSV, which writes unique (merged .MER time and .LOG
    position) GPS fixes.

    '''

    gps_genexp = (gps for dive in dive_logs for gps in dive.gps_nonunique_list)

    # Version and creation-date lines are the same for both csv and txt files
    version_line = "#automaid {} ({})\n".format(setup.get_version(), setup.get_url())
    created_line = "#created {}\n".format(creation_datestr)

    # Specify field headers of both csv and txt files
    header_line_txt = "           gps_time       gps_lat        gps_lon  gps_hdop  gps_vdop    gps_time-mer_time mer_clockfreq             source       raw_gps_lat        raw_gps_lon\n"
    header_line_csv = '#' + ','.join(header_line_txt.split()) + '\n'
    header_line_txt = '#' + header_line_txt # add pound sign after comma substitution

    # Specify generic format of both csv and txt files
    fmt = ['{:>19s}',
           '{:>10.6f}',
           '{:>11.6f}',
           '{:>6.3f}',
           '{:>6.3f}',
           '{:>17.6f}',
           '{:>10.0f}',
           '{:>15s}',
           '{:>14s}',
           '{:>15s}\n']

    # Add four spaces between each field for the txt file
    fmt_txt  = '    '.join(fmt)

    # Add comma between each field and remove field width (non-decimal) to format the csv
    fmt_csv  = ','.join(fmt)
    fmt_csv  = re.sub(':>\d*', ':', fmt_csv)

    # Specify file paths
    base_path = os.path.join(processed_path, mfloat_path)
    csv_file =  os.path.join(base_path, 'gps.csv')
    txt_file =  os.path.join(base_path, 'gps.txt')

    with open(csv_file, "w+") as f_csv, open(txt_file, "w+") as f_txt:
        # Write the version and header lines to both the csv and txt file
        f_csv.write(version_line)
        f_csv.write(created_line)
        f_csv.write(header_line_csv)

        f_txt.write(version_line)
        f_txt.write(created_line)
        f_txt.write(header_line_txt)

        for g in sorted(gps_genexp, key=lambda x: x.date):
            if g.hdop is None:
                g.hdop = float("NaN")
            if g.vdop is None:
                g.vdop = float("NaN")
            if g.clockfreq is None:
                g.clockfreq = float("NaN")

            # Parse and format the raw strings.
            raw_lat = g.rawstr_dict['latitude']
            raw_lon = g.rawstr_dict['longitude']

            # Collect list of GPS data
            gps_data = [str(g.date)[:19] + 'Z',
                        g.latitude,
                        g.longitude,
                        g.hdop,
                        g.vdop,
                        g.clockdrift,
                        g.clockfreq,
                        g.source,
                        raw_lat,
                        raw_lon]

            # Write data to .csv and .txt formats
            f_csv.write(fmt_csv.format(*gps_data))
            f_txt.write(fmt_txt.format(*gps_data))
Beispiel #11
0
def write_gps_interpolation_txt(complete_dives, creation_datestr, processed_path, mfloat_path):
    '''Writes MERMAID GPS interpolation file, detailing GPS and interpolation parameters for the three
    main regimes of each dive: descent and drift in the surface layer, drift in the mixed layer, and
    ascent and drift in the surface layer.


    '''

    # NB, the comments here assume a normal dive where all GPS fixes are obtained and MERMAID dives
    # deeper than the mix layer depth; see especially dives.compute_station_locations and
    # gps.linear_interpolation to understand of edge-cases where perhaps some GPS fixes are missing
    # and/or MERMAID didn't dive into the mixed layer.  In all cases, GPS interpolation is still
    # broken into three regimes: descent drift, "deep" drift, and ascent drift.  Descent drift uses
    # the surface-drift velocity before the dive to interpolate forward in time for the location
    # where MERMAID dove into the mixed layer (left the surface layer); ascent drift uses the
    # surface-drift velocity after the dive to interpolate backward in time for the location where
    # MERMAID ascended into the surface layer (left the mixed layer); "deep" drift uses the velocity
    # of drift between those two points to estimate where MERMAID was when it recorded events while
    # drifting in the mixed layer.

    # "input" to gps.linear_interpolation are those GPS instances that we give the algorithm
    def parse_input_params(leg):
        input_params = [leg['input_drift_time']               if leg['input_drift_time'] else float("Nan"),
                        leg['input_drift_time'] / 60.0        if leg['input_drift_time'] else float("Nan"),
                        leg['input_drift_dist_m']             if leg['input_drift_dist_m'] else float("Nan"),
                        leg['input_drift_dist_m'] / 1000      if leg['input_drift_dist_m'] else float("Nan"),
                        leg['input_drift_vel_ms']             if leg['input_drift_vel_ms'] else float("Nan"),
                        leg['input_drift_vel_ms'] * 3.6       if leg['input_drift_vel_ms'] else float("Nan"), # km/hr
                        leg['input_drift_vel_ms'] * 3.6 * 24  if leg['input_drift_vel_ms'] else float("Nan")] # km/day

        input_params = map(abs, input_params)
        input_fmt_spec = '{:>6.0f}        {:>7.1f}        {:>6.0f}        {:>4.1f}        {:>5.2f}        {:>7.2f}        {:>7.2f}'

        return (input_params, input_fmt_spec)

    # "interp" from gps.linear_interpolation are those GPS instances the algorithm computes given
    # the input
    def parse_interp_params(leg):
        interp_params = [leg['interp_drift_time']              if leg['interp_drift_time'] else float("Nan"),
                        leg['interp_drift_time'] / 60.0        if leg['interp_drift_time'] else float("Nan"),
                        leg['interp_drift_dist_m']             if leg['interp_drift_dist_m'] else float("Nan"),
                        leg['interp_drift_dist_m'] / 1000      if leg['interp_drift_dist_m'] else float("Nan"),
                        leg['interp_drift_vel_ms']             if leg['interp_drift_vel_ms'] else float("Nan"),
                        leg['interp_drift_vel_ms'] * 3.6       if leg['interp_drift_vel_ms'] else float("Nan"), # km/hr
                        leg['interp_drift_vel_ms'] * 3.6 * 24  if leg['interp_drift_vel_ms'] else float("Nan")] # km/day

        interp_params = map(abs, interp_params)
        interp_fmt_spec = '{:>6.0f}        {:>7.1f}        {:>6.0f}        {:>4.1f}        {:>5.2f}        {:>7.2f}        {:>7.2f}'

        return (interp_params, interp_fmt_spec)

    # Generate (unique) list of dives with events whose interpolated locations we are able to compute
    dive_set = set(dive for dive in complete_dives for event in dive.events if event.station_loc)

    # Print GPS interpolation information for every dive that includes an event all three dive regimes
    gps_interp_file = os.path.join(processed_path, mfloat_path, "gps_interpolation.txt")
    version_line = "#automaid {} ({})\n".format(setup.get_version(), setup.get_url())
    created_line = "#created {}\n".format(creation_datestr)

    with open(gps_interp_file, "w+") as f:
        f.write(version_line)
        f.write(created_line)

        for dive in sorted(dive_set, key=lambda x: x.start_date):

            # Compute the percentage of the total interpolate distance for the three regimes:
            # (1) surface-layer drift during the descent
            #
            # (2) mixed_layer drift
            #     .station.loc['interp_dist_m'] differs for each event  (drift to event in mixed layer)
            #     .station.loc['input_dist_m'] same for all events (total mixed-layer drift)
            #
            # (3) surface-layer drift during the ascent

            leg_descent = dive.descent_last_loc_before_event
            leg_ascent = dive.ascent_first_loc_after_event
            if leg_descent is None or leg_ascent is None:
                continue

            interp_dist_descent = leg_descent.interp_dict['interp_drift_dist_m']
            input_dist_mixed =  dive.events[0].station_loc.interp_dict['input_drift_dist_m']
            interp_dist_ascent = leg_ascent.interp_dict['interp_drift_dist_m']

            if all([interp_dist_descent, input_dist_mixed, interp_dist_ascent]):
                bad_interp = False
                total_interp_dist = sum([interp_dist_descent, input_dist_mixed, interp_dist_ascent])
                interp_perc_descent = (interp_dist_descent / total_interp_dist) * 100
                input_perc_mixed = (input_dist_mixed / total_interp_dist) * 100
                interp_perc_ascent = (interp_dist_ascent / total_interp_dist) * 100

            else:
                bad_interp = True
                interp_perc_descent = float("nan")
                input_perc_mixed = float("nan")
                interp_perc_ascent = float("nan")

            # Write headers to each dive block
            f.write("DIVE ID: ")
            for id in dive.dive_id:
                f.write("{:>4d}".format(id))

                if id != dive.dive_id[-1]:
                    f.write(", ")

                else:
                    f.write("\n")
            f.write("DATES: {:>19s} --> {:19s}\n\n".format(str(dive.start_date)[:19] + 'Z', str(dive.end_date)[:19] + 'Z'))
            f.write("DRIFT_REGIME               TIME_S       TIME_MIN        DIST_M     DIST_KM      VEL_M/S      VEL_KM/HR     VEL_KM/DAY      DIST_%                                 SAC_MSEED_TRACE\n")

            # Parse the GPS ('input') components of surface drift before dive: these are actual GPS points
            gps_surface_descent, gps_fmt_spec = parse_input_params(leg_descent.interp_dict)

            gps_fmt_spec = "gps_surface                " + gps_fmt_spec + "\n"
            f.write(gps_fmt_spec.format(*gps_surface_descent))

            # Parse the interpolated components of surface drift before dive: between last GPS point
            # and crossing into mixed layer
            interp_surface_descent, interp_fmt_spec = parse_interp_params(leg_descent.interp_dict)
            interp_surface_descent.append(interp_perc_descent)

            interp_fmt_spec = "interp_surface             " + interp_fmt_spec + "        {:>4.1f}\n"
            f.write(interp_fmt_spec.format(*interp_surface_descent))

            # For every event recorded during the dive: parse the interpolated components of the
            # mixed-layer drift from leaving the surface layer (passing into the "deep" or
            # mixed-layer drift regime) and recording an event
            for event in dive.events:
                # if event.station_loc_is_preliminary:
                #     continue

                interp_drift_to_event_mixed_layer, interp_fmt_spec = parse_interp_params(event.station_loc.interp_dict)
                interp_drift_to_event_mixed_layer.append(event.processed_file_name)

                interp_fmt_spec = " interp_mixed(to_event)    " + interp_fmt_spec + "                    {:>40s}\n"
                f.write(interp_fmt_spec.format(*interp_drift_to_event_mixed_layer))

            # The total interpolated drift in the mixed layer -- that drift that occurs between the
            # last point of the ascent and the first point of the ascent -- is the same for every
            # event; just use the last event instance
            total_drift_mixed_layer, interp_fmt_spec = parse_input_params(dive.events[0].station_loc.interp_dict)
            total_drift_mixed_layer.append(input_perc_mixed)

            interp_fmt_spec = "interp_mixed(total)        " + interp_fmt_spec + "        {:>4.1f}\n"
            f.write(interp_fmt_spec.format(*total_drift_mixed_layer))

            # Parse the interpolated components of surface drift after dive: crossing out of mixed
            # layer and recording first GPS point
            interp_surface_ascent, interp_fmt_spec = parse_interp_params(leg_ascent.interp_dict)
            interp_surface_ascent.append(interp_perc_ascent)

            interp_fmt_spec = "interp_surface             " + interp_fmt_spec + "        {:>4.1f}\n"
            f.write(interp_fmt_spec.format(*interp_surface_ascent))

            # Parse the GPS ('input') components of surface drift after dive: these are actual GPS points
            gps_surface_ascent, gps_fmt_spec = parse_input_params(leg_ascent.interp_dict)

            gps_fmt_spec = "gps_surface                " + gps_fmt_spec + "\n"
            f.write(gps_fmt_spec.format(*gps_surface_ascent))

            # If the interpolation failed, print some helpful statements at end of block
            if bad_interp:
                f.write('\n')
                if leg_descent.interp_dict['input_drift_dist_m'] is None:
                    f.write("*Interpolation issue before dive (surface-layer drift): {:s}\n" \
                            .format(leg_descent.interp_dict['description']))

                if dive.events[0].station_loc.interp_dict['input_drift_dist_m'] is None:
                    f.write("*Interpolation issue during dive (mixed-layer drift): {:s}\n" \
                            .format(dive.events[0].station_loc.interp_dict['description']))

                if leg_ascent.interp_dict['input_drift_dist_m'] is None:
                    f.write("*Interpolation issue after dive (surface-layer drift): {:s}\n" \
                            .format(leg_ascent.interp_dict['description']))

            f.write('\n__________END__________\n\n')
Beispiel #12
0
 def test_get_version(self):
     self.assertEqual('2!2016.1.27rc2', get_version("dummy"))
Beispiel #13
0
 def test_get_version(self):
     self.assertEqual('1.2.3', get_version("dummy"))
Beispiel #14
0
# source_encoding = 'utf-8-sig'

# The master toctree document.
master_doc = 'index'

# General information about the project.
project = u'Lago'
copyright = u'2015, David Caro'
author = u'David Caro'

# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = lago_setup.get_version('..')
# The full version, including alpha/beta/rc tags.
release = version

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None

# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
Beispiel #15
0
#source_encoding = 'utf-8'

# The master toctree document.
master_doc = 'index'

# General information about the project.
project = 'execo'
copyright = '2009-2016, INRIA Rhone-Alpes, Service Experimentation et Developpement'

# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from setup import get_version
version = get_version()
# The full version, including alpha/beta/rc tags.
release = get_version()

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None

# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'

# List of documents that shouldn't be included in the build.
#unused_docs = []
Beispiel #16
0
#source_encoding = 'utf-8-sig'

# The master toctree document.
master_doc = 'index'

# General information about the project.
project = u'PySB'
copyright = u'2012, C. F. Lopez, J. L. Muhlich, J. A. Bachman'

# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import setup as pysb_setup
# The full version, including alpha/beta/rc tags.
release = pysb_setup.get_version()
# The short X.Y version.
version = re.sub(r'-.*', '', release)

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None

# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'

# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
Beispiel #17
0
Datei: conf.py Projekt: nirs/lago
# source_encoding = 'utf-8-sig'

# The master toctree document.
master_doc = 'index'

# General information about the project.
project = u'Lago'
copyright = u'2015-2017, David Caro and Lago developers'
author = u'David Caro and Lago Developers'

# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = lago_setup.get_version('..')
# The full version, including alpha/beta/rc tags.
release = version

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None

# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
Beispiel #18
0
#source_encoding = 'utf-8-sig'

# The master toctree document.
master_doc = 'index'

# General information about the project.
project = u'PySB'
copyright = u'2012, C. F. Lopez, J. L. Muhlich, J. A. Bachman'

# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import setup as pysb_setup
# The full version, including alpha/beta/rc tags.
release = pysb_setup.get_version()
# The short X.Y version.
version = re.sub(r'-.*', '', release)

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None

# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'

# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
Beispiel #19
0
#
# All configuration values have a default; values that are commented out
# serve to show the default.

# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.

import os
import sys

from setup import get_version

sys.path.insert(0, os.path.abspath('../'))

_version = get_version()

# -- General configuration ------------------------------------------------

# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'

# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
    'sphinx.ext.autodoc',
]

# Add any paths that contain templates here, relative to this directory.
Beispiel #20
0
#source_encoding = 'utf-8-sig'

# The master toctree document.
master_doc = 'index'

# General information about the project.
project = u'varsens'
copyright = u'2013, C. F. Lopez, S. P. Garbett'

# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import setup as varsens_setup
# The full version, including alpha/beta/rc tags.
release = varsens_setup.get_version()
# The short X.Y version.
version = re.sub(r'-.*', '', release)

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None

# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'

# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
Beispiel #21
0
# Last modified by JDS: 26-Aug-2021
# Last tested: Python 2.7.15, Darwin-18.7.0-x86_64-i386-64bit

import re
import struct
import warnings
import numpy as np
import plotly.graph_objs as graph

from obspy import UTCDateTime
from obspy.io.mseed import util as obspy_util

import setup

# Get current version number.
version = setup.get_version()

#
# Log files utilities
#

# Split logs in several lines
def split_log_lines(content):
    splitted = []
    if "\r\n" in content:
        splitted = content.split("\r\n")
    elif "\r" in content:
        splitted = content.split("\r")
    elif "\n" in content:
        splitted = content.split("\n")
    if splitted[-1] == "":
Beispiel #22
0
 def test_get_version(self):
     self.assertIsNone(get_version("dummy"))
Beispiel #23
0
#
import os
import sys

sys.path.insert(0, os.path.abspath("../"))

from setup import get_version

# -- Project information -----------------------------------------------------

project = "PyFunceble"
copyright = "2017, 2018, 2019, 2020, 2021 Nissar Chababy (@funilrys)"
author = "Nissar Chababy (@funilrys)"

# The short X.Y version
version = get_version()
# The full version, including alpha/beta/rc tags
release = get_version()

# -- General configuration ---------------------------------------------------

# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'

# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
    "sphinx.ext.autodoc",
    "sphinx.ext.todo",
Beispiel #24
0
#
# All configuration values have a default; values that are commented out
# serve to show the default.

# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.

import os
import sys

from setup import get_version

sys.path.insert(0, os.path.abspath('../'))

_version = get_version()

# -- General configuration ------------------------------------------------

# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'

# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
    'sphinx.ext.autodoc',
]

# Add any paths that contain templates here, relative to this directory.