Beispiel #1
0
def data_timing_quality(path="",
                        average_calc_length=1,
                        scale=lambda x: x,
                        **kwargs):
    """
    Reads timing quality values from miniseed file using
    :func:`~obspy.io.mseed.util.get_record_information` function.

    :type path: str
    :param path: path to miniseed file
    :type average_calc_length: int, optional
    :param average_calc_length: defaults to one, setting this value greater
        than one will result in a little bit smoothed timing quality curve,
        since the returned datapoints will be averages over
        ``average_calc_length`` values.
    :type scale: func, optional
    :param scale: defaults to identity function,
        scaling function applied to data values
    :rtype: list or None
    :return: list of lists containing timestamp (as
        :class:`~obspy.core.utcdatetime.UTCDateTime` instance) and data value.
    """
    imm = fileutils.invalid_mseed(path)
    if imm:
        messenger(imm, "M")
        return None

    timing_quality = []
    tqtimes = []
    last_qualities = []
    offset = 0

    # Codes from obspy
    # Loop over each record.
    # A valid record needs to have a record length of at
    # least 256 bytes.
    info = get_record_information(path)
    while offset <= (info['filesize'] - 256):
        this_info = get_record_information(path, offset)
        if 'timing_quality' in this_info:

            last_qualities.append(float(this_info['timing_quality']))
            length = len(last_qualities)
            ave = sum(last_qualities) / length
            if length > average_calc_length:
                last_qualities = last_qualities[1:]

            timing_quality.append(ave)
            tqtimes.append(this_info['starttime'])

        offset += this_info['record_length']

    return [[x, scale(y)] for x, y in zip(tqtimes, timing_quality)]
Beispiel #2
0
 def test_getRecordInformation(self):
     """
     Tests the util._get_ms_file_info method with known values.
     """
     filename = os.path.join(self.path, 'data',
                             'BW.BGLD.__.EHE.D.2008.001.first_10_records')
     # Simply reading the file.
     info = util.get_record_information(filename)
     self.assertEqual(info['filesize'], 5120)
     self.assertEqual(info['record_length'], 512)
     self.assertEqual(info['number_of_records'], 10)
     self.assertEqual(info['excess_bytes'], 0)
     # Now with an open file. This should work regardless of the current
     # value of the file pointer and it should also not change the file
     # pointer.
     with open(filename, 'rb') as open_file:
         open_file.seek(1234)
         info = util.get_record_information(open_file)
         self.assertEqual(info['filesize'], 5120 - 1234)
         self.assertEqual(info['record_length'], 512)
         self.assertEqual(info['number_of_records'], 7)
         self.assertEqual(info['excess_bytes'], 302)
         self.assertEqual(open_file.tell(), 1234)
     # Now test with a BytesIO with the first ten percent.
     with open(filename, 'rb') as open_file:
         open_file_string = io.BytesIO(open_file.read())
     open_file_string.seek(111)
     info = util.get_record_information(open_file_string)
     self.assertEqual(info['filesize'], 5120 - 111)
     self.assertEqual(info['record_length'], 512)
     self.assertEqual(info['number_of_records'], 9)
     self.assertEqual(info['excess_bytes'], 401)
     self.assertEqual(open_file_string.tell(), 111)
     # One more file containing two records.
     filename = os.path.join(self.path, 'data', 'test.mseed')
     info = util.get_record_information(filename)
     self.assertEqual(info['filesize'], 8192)
     self.assertEqual(info['record_length'], 4096)
     self.assertEqual(info['number_of_records'], 2)
     self.assertEqual(info['excess_bytes'], 0)
Beispiel #3
0
 def test_getRecordInformation(self):
     """
     Tests the util._get_ms_file_info method with known values.
     """
     filename = os.path.join(self.path, 'data',
                             'BW.BGLD.__.EHE.D.2008.001.first_10_records')
     # Simply reading the file.
     info = util.get_record_information(filename)
     self.assertEqual(info['filesize'], 5120)
     self.assertEqual(info['record_length'], 512)
     self.assertEqual(info['number_of_records'], 10)
     self.assertEqual(info['excess_bytes'], 0)
     # Now with an open file. This should work regardless of the current
     # value of the file pointer and it should also not change the file
     # pointer.
     with open(filename, 'rb') as open_file:
         open_file.seek(1234)
         info = util.get_record_information(open_file)
         self.assertEqual(info['filesize'], 5120 - 1234)
         self.assertEqual(info['record_length'], 512)
         self.assertEqual(info['number_of_records'], 7)
         self.assertEqual(info['excess_bytes'], 302)
         self.assertEqual(open_file.tell(), 1234)
     # Now test with a BytesIO with the first ten percent.
     with open(filename, 'rb') as open_file:
         open_file_string = io.BytesIO(open_file.read())
     open_file_string.seek(111)
     info = util.get_record_information(open_file_string)
     self.assertEqual(info['filesize'], 5120 - 111)
     self.assertEqual(info['record_length'], 512)
     self.assertEqual(info['number_of_records'], 9)
     self.assertEqual(info['excess_bytes'], 401)
     self.assertEqual(open_file_string.tell(), 111)
     # One more file containing two records.
     filename = os.path.join(self.path, 'data', 'test.mseed')
     info = util.get_record_information(filename)
     self.assertEqual(info['filesize'], 8192)
     self.assertEqual(info['record_length'], 4096)
     self.assertEqual(info['number_of_records'], 2)
     self.assertEqual(info['excess_bytes'], 0)
Beispiel #4
0
    def test_read_file_with_microsecond_wrap(self):
        """
        This is not strictly valid but I encountered such a file in practice
        so I guess it happens. Libmseed can also correctly deal with it.

        The test file is a single record with the .0001 seconds field set to
        10000. SEED strictly allows only 0-9999 in this field.
        """
        file = os.path.join(self.path, "data", "microsecond_wrap.mseed")

        with warnings.catch_warnings(record=True) as w_1:
            warnings.simplefilter("always")
            info = util.get_record_information(file)

        self.assertEqual(
            w_1[0].message.args[0],
            'Record contains a fractional seconds (.0001 secs) '
            'of 10000 - the maximum strictly allowed value is '
            '9999. It will be interpreted as one or more '
            'additional seconds.')

        with warnings.catch_warnings(record=True) as w_2:
            warnings.simplefilter("always")
            tr = read(file)[0]

        # First warning is identical.
        self.assertEqual(w_1[0].message.args[0], w_2[0].message.args[0])
        # Second warning is raised by libmseed.
        self.assertEqual(
            w_2[1].message.args[0],
            'readMSEEDBuffer(): Record with offset=0 has a '
            'fractional second (.0001 seconds) of 10000. This '
            'is not strictly valid but will be interpreted as '
            'one or more additional seconds.')

        # Make sure libmseed and the internal ObsPy record parser produce
        # the same result.
        self.assertEqual(info["starttime"], tr.stats.starttime)
        self.assertEqual(info["endtime"], tr.stats.endtime)

        # Read with a hex-editor.
        ref_time = UTCDateTime(year=2008,
                               julday=8,
                               hour=4,
                               minute=58,
                               second=5 + 1)
        self.assertEqual(ref_time, info["starttime"])
        self.assertEqual(ref_time, tr.stats.starttime)
    def test_read_file_with_microsecond_wrap(self):
        """
        This is not strictly valid but I encountered such a file in practice
        so I guess it happens. Libmseed can also correctly deal with it.

        The test file is a single record with the .0001 seconds field set to
        10000. SEED strictly allows only 0-9999 in this field.
        """
        file = os.path.join(self.path, "data",
                            "microsecond_wrap.mseed")

        with warnings.catch_warnings(record=True) as w_1:
            warnings.simplefilter("always")
            info = util.get_record_information(file)

        self.assertEqual(w_1[0].message.args[0],
                         'Record contains a fractional seconds (.0001 secs) '
                         'of 10000 - the maximum strictly allowed value is '
                         '9999. It will be interpreted as one or more '
                         'additional seconds.')

        with warnings.catch_warnings(record=True) as w_2:
            warnings.simplefilter("always")
            tr = read(file)[0]

        # First warning is identical.
        self.assertEqual(w_1[0].message.args[0], w_2[0].message.args[0])
        # Second warning is raised by libmseed.
        self.assertEqual(w_2[1].message.args[0],
                         'readMSEEDBuffer(): Record with offset=0 has a '
                         'fractional second (.0001 seconds) of 10000. This '
                         'is not strictly valid but will be interpreted as '
                         'one or more additional seconds.')

        # Make sure libmseed and the internal ObsPy record parser produce
        # the same result.
        self.assertEqual(info["starttime"], tr.stats.starttime)
        self.assertEqual(info["endtime"], tr.stats.endtime)

        # Read with a hex-editor.
        ref_time = UTCDateTime(year=2008, julday=8, hour=4, minute=58,
                               second=5 + 1)
        self.assertEqual(ref_time, info["starttime"])
        self.assertEqual(ref_time, tr.stats.starttime)
Beispiel #6
0
def download_and_split_mseed_bulk(client, client_name, chunks, logger):
    """
    Downloads the channels of a list of stations in bulk, saves it to a
    temporary folder and splits it at the record level to obtain the final
    MiniSEED files.

    The big advantage of this approach is that it does not mess with the
    MiniSEED files at all. Each record, including all blockettes, will end
    up in the final files as they are served from the data centers.

    :param client: An active client instance.
    :param client_name: The name of the client instance used for logging
        purposes.
    :param chunks: A list of tuples, each denoting a single MiniSEED chunk.
        Each chunk is a tuple of network, station, location, channel,
        starttime, endtime, and desired filename.
    :param logger: An active logger instance.
    """
    # Create a dictionary of channel ids, each containing a list of
    # intervals, each of which will end up in a separate file.
    filenames = collections.defaultdict(list)
    for chunk in chunks:
        candidate = {
            "starttime": chunk[4],
            "endtime": chunk[5],
            "filename": chunk[6],
            "current_latest_endtime": None,
            "sequence_number": None
        }
        # Should not be necessary if chunks have been deduplicated before but
        # better safe than sorry.
        if candidate in filenames[tuple(chunk[:4])]:
            continue
        filenames[tuple(chunk[:4])].append(candidate)

    sequence_number = [0]

    def get_filename(starttime, endtime, c):
        """
        Helper function finding the corresponding filename in all filenames.

        :param starttime: The start time of the record.
        :param endtime: The end time of the record.
        :param c: A list of candidates.
        """
        # Make two passes. First find all candidates. This assumes that a
        # record cannot be larger than a single desired time interval. This
        # is probably always given except if somebody wants to download
        # files split into 1 second intervals...
        candidates = [
            _i for _i in c
            if (_i["starttime"] <= starttime <= _i["endtime"]) or (
                _i["starttime"] <= endtime <= _i["endtime"])
        ]
        if not candidates:
            return None

        # If more then one candidate, apply some heuristics to find the
        # correct time interval. The main complication arises when the same
        # record is downloaded twice as it overlaps into two different
        # requested time intervals.
        if len(candidates) == 2:
            candidates.sort(key=lambda x: x["starttime"])
            first, second = candidates

            # Make sure the assumptions about the type of overlap are correct.
            if starttime > first["endtime"] or endtime < second["starttime"]:
                raise NotImplementedError

            # It must either be the last record of the first, or the first
            # record of the second candidate.
            if first["sequence_number"] is None and \
                    second["sequence_number"] is None:
                candidates = [second]

            # Unlikely to happen. Only if nothing but the very last record
            # of the first interval was available and the second interval
            # was first in the file.
            elif first["sequence_number"] is None:
                candidates = [first]

            # This is fairly likely and requires an additional check with
            # the latest time in the first interval.
            elif second["sequence_number"] is None:
                if starttime <= first["current_latest_endtime"]:
                    candidates = [second]
                else:
                    candidates = [first]

            # Neither are None. Just use the one with the higher sequence
            # number. This probably does not happen. If it happens something
            # else is a bit strange.
            else:
                if first["sequence_number"] > second["sequence_number"]:
                    candidates = [first]
                else:
                    candidates = [second]
        elif len(candidates) >= 2:
            raise NotImplementedError(
                "Please contact the developers. candidates: %s" %
                str(candidates))

        # Finally found the correct chunk
        ret_val = candidates[0]

        # Increment sequence number and make sure the current chunk is aware
        # of it.
        sequence_number[0] += 1
        ret_val["sequence_number"] = sequence_number[0]

        # Also write the time of the last chunk to it if necessary.
        ce = ret_val["current_latest_endtime"]
        if not ce or endtime > ce:
            ret_val["current_latest_endtime"] = endtime

        return ret_val["filename"]

    # Only the filename is not needed for the actual data request.
    bulk = [list(_i[:-1]) for _i in chunks]
    original_bulk_length = len(bulk)

    # Merge adjacent bulk-request for continuous downloads. This is a bit
    # redundant after splitting it up before, but eases the logic in the
    # other parts and puts less strain on the data centers' FDSN
    # implementation. It furthermore avoid the repeated download of records
    # that are part of two neighbouring time intervals.
    bulk_channels = collections.defaultdict(list)
    for b in bulk:
        bulk_channels[(b[0], b[1], b[2], b[3])].append(b)

    # Merge them.
    for key, value in bulk_channels.items():
        # Sort based on starttime.
        value = sorted(value, key=lambda x: x[4])
        # Merge adjacent.
        cur_bulk = value[0:1]
        for b in value[1:]:
            # Random threshold of 2 seconds. Reasonable for most real world
            # cases.
            if b[4] <= cur_bulk[-1][5] + 2:
                cur_bulk[-1][5] = b[5]
                continue
            cur_bulk.append(b)
        bulk_channels[key] = cur_bulk
    bulk = list(itertools.chain.from_iterable(bulk_channels.values()))

    # Save first to a temporary file, then cut the file into separate files.
    with NamedTemporaryFile() as tf:
        temp_filename = tf.name
        open_files = {}

        client.get_waveforms_bulk(bulk, filename=temp_filename)
        # If that succeeds, split the old file into multiple new ones.
        file_size = os.path.getsize(temp_filename)

        with open(temp_filename, "rb") as fh:
            try:
                while True:
                    if fh.tell() >= (file_size - 256):
                        break
                    info = get_record_information(fh)
                    channel_id = (info["network"], info["station"],
                                  info["location"], info["channel"])

                    # Sometimes the services return something nobody wants...
                    if channel_id not in filenames:
                        fh.read(info["record_length"])
                        continue
                    # Get the best matching filename.
                    filename = get_filename(starttime=info["starttime"],
                                            endtime=info["endtime"],
                                            c=filenames[channel_id])
                    # Again sometimes there are time ranges nobody asked for...
                    if filename is None:
                        fh.read(info["record_length"])
                        continue
                    if filename not in open_files:
                        open_files[filename] = open(filename, "wb")
                    open_files[filename].write(fh.read(info["record_length"]))
            finally:
                for f in open_files.values():
                    try:
                        f.close()
                    except Exception:
                        pass
    logger.info("Client '%s' - Successfully downloaded %i channels (of %i)" %
                (client_name, len(open_files), original_bulk_length))
    return sorted(open_files.keys())
Beispiel #7
0
def download_and_split_mseed_bulk(client, client_name, chunks, logger):
    """
    Downloads the channels of a list of stations in bulk, saves it to a
    temporary folder and splits it at the record level to obtain the final
    MiniSEED files.

    The big advantage of this approach is that it does not mess with the
    MiniSEED files at all. Each record, including all blockettes, will end
    up in the final files as they are served from the data centers.

    :param client: An active client instance.
    :param client_name: The name of the client instance used for logging
        purposes.
    :param chunks: A list of tuples, each denoting a single MiniSEED chunk.
        Each chunk is a tuple of network, station, location, channel,
        starttime, endtime, and desired filename.
    :param logger: An active logger instance.
    """
    # Create a dictionary of channel ids, each containing a list of
    # intervals, each of which will end up in a separate file.
    filenames = collections.defaultdict(list)
    for chunk in chunks:
        filenames[tuple(chunk[:4])].append({
            "starttime": chunk[4],
            "endtime": chunk[5],
            "filename": chunk[6],
            "current_latest_endtime": None,
            "sequence_number": None})

    sequence_number = [0]

    def get_filename(starttime, endtime, c):
        """
        Helper function finding the corresponding filename in all filenames.

        :param starttime: The start time of the record.
        :param endtime: The end time of the record.
        :param c: A list of candidates.
        """
        # Make two passes. First find all candidates. This assumes that a
        # record cannot be larger than a single desired time interval. This
        # is probably always given except if somebody wants to download
        # files split into 1 second intervals...
        candidates = [
            _i for _i in c if
            (_i["starttime"] <= starttime <= _i["endtime"]) or
            (_i["starttime"] <= endtime <= _i["endtime"])]
        if not candidates:
            return None

        # If more then one candidate, apply some heuristics to find the
        # correct time interval. The main complication arises when the same
        # record is downloaded twice as it overlaps into two different
        # requested time intervals.
        if len(candidates) == 2:
            candidates.sort(key=lambda x: x["starttime"])
            first, second = candidates

            # Make sure the assumptions about the type of overlap are correct.
            if starttime > first["endtime"] or endtime < second["starttime"]:
                raise NotImplementedError

            # It must either be the last record of the first, or the first
            # record of the second candidate.
            if first["sequence_number"] is None and \
                    second["sequence_number"] is None:
                candidates = [second]

            # Unlikely to happen. Only if nothing but the very last record
            # of the first interval was available and the second interval
            # was first in the file.
            elif first["sequence_number"] is None:
                candidates = [first]

            # This is fairly likely and requires an additional check with
            # the latest time in the first interval.
            elif second["sequence_number"] is None:
                if starttime <= first["current_latest_endtime"]:
                    candidates = [second]
                else:
                    candidates = [first]

            # Neither are None. Just use the one with the higher sequence
            # number. This probably does not happen. If it happens something
            # else is a bit strange.
            else:
                if first["sequence_number"] > second["sequence_number"]:
                    candidates = [first]
                else:
                    candidates = [second]
        elif len(candidates) >= 2:
            raise NotImplementedError

        # Finally found the correct chunk
        ret_val = candidates[0]

        # Increment sequence number and make sure the current chunk is aware
        # of it.
        sequence_number[0] += 1
        ret_val["sequence_number"] = sequence_number[0]

        # Also write the time of the last chunk to it if necessary.
        ce = ret_val["current_latest_endtime"]
        if not ce or endtime > ce:
            ret_val["current_latest_endtime"] = endtime

        return ret_val["filename"]

    # Only the filename is not needed for the actual data request.
    bulk = [list(_i[:-1]) for _i in chunks]
    original_bulk_length = len(bulk)

    # Merge adjacent bulk-request for continuous downloads. This is a bit
    # redundant after splitting it up before, but eases the logic in the
    # other parts and puts less strain on the data centers' FDSN
    # implementation. It furthermore avoid the repeated download of records
    # that are part of two neighbouring time intervals.
    bulk_channels = collections.defaultdict(list)
    for b in bulk:
        bulk_channels[(b[0], b[1], b[2], b[3])].append(b)

    # Merge them.
    for key, value in bulk_channels.items():
        # Sort based on starttime.
        value = sorted(value, key=lambda x: x[4])
        # Merge adjacent.
        cur_bulk = value[0:1]
        for b in value[1:]:
            # Random threshold of 2 seconds. Reasonable for most real world
            # cases.
            if b[4] <= cur_bulk[-1][5] + 2:
                cur_bulk[-1][5] = b[5]
                continue
            cur_bulk.append(b)
        bulk_channels[key] = cur_bulk
    bulk = list(itertools.chain.from_iterable(bulk_channels.values()))

    # Save first to a temporary file, then cut the file into separate files.
    with NamedTemporaryFile() as tf:
        temp_filename = tf.name
        open_files = {}

        client.get_waveforms_bulk(bulk, filename=temp_filename)
        # If that succeeds, split the old file into multiple new ones.
        file_size = os.path.getsize(temp_filename)

        with open(temp_filename, "rb") as fh:
            try:
                while True:
                    if fh.tell() >= (file_size - 256):
                        break
                    info = get_record_information(fh)
                    channel_id = (info["network"], info["station"],
                                  info["location"], info["channel"])

                    # Sometimes the services return something nobody wants...
                    if channel_id not in filenames:
                        fh.read(info["record_length"])
                        continue
                    # Get the best matching filename.
                    filename = get_filename(
                        starttime=info["starttime"], endtime=info["endtime"],
                        c=filenames[channel_id])
                    # Again sometimes there are time ranges nobody asked for...
                    if filename is None:
                        fh.read(info["record_length"])
                        continue
                    if filename not in open_files:
                        open_files[filename] = open(filename, "wb")
                    open_files[filename].write(fh.read(info["record_length"]))
            finally:
                for f in open_files.values():
                    try:
                        f.close()
                    except Exception:
                        pass
    logger.info("Client '%s' - Successfully downloaded %i channels (of %i)" % (
        client_name, len(open_files), original_bulk_length))
    return sorted(open_files.keys())
Beispiel #8
0
def set_mseed_time_correction(mseed_filename, time_corr_secs):
    """Set 'Time correction applied' flag and 'Time correction' value in every
    'Fixed section of Data Header' that precedes each data record of a
    time-corrected miniSEED file.

    Args:
        mseed_filename (str): Time-corrected miniSEED filename
        time_corr_secs (float): Time correction [seconds]

    Result:
        modifies miniSEED file (see warnings and notes)

    Warnings:
    * Unsets all other 'Activity', 'I/O and clock', and 'Data Quality' flags.
    * Only adds time correction to header; does not also adjust start/end times.

    Verifications:
    [1] Verify the 'Timing correction applied' FLAG has been set for N records:

        `>> obspy.io.mseed.util.get_flags(mseed_filename)`

    [2] Verify the 'Timing correction' VALUE has been noted for N records:

        `$ python -m obspy.io.mseed.scripts.recordanalyzer -a mseed_filename`

    Notes:
    * Time correction value in [1] appears to be a bug/percentage?
    * Time correction value in [2] is in units of 0.0001 seconds.
    * In [2] it is unknown what 'Activity flags: 2'  means.

    """
    ## All page numbers refer to the SEED Format Version 2.4 manual
    ## http://www.fdsn.org/pdf/SEEDManual_V2.4.pdf

    # Time correction values are in units of 0.0001 (1e-4) seconds (pg. 109)
    time_corr_one_ten_thous = np.int32(time_corr_secs / 0.0001)

    # Set "Time correction applied" [Bit 1] (Note 12;  pg. 108)
    # Warning: this unsets any other flags that are set
    flags = {'...': {'activity_flags': {'time_correction': True}}}
    obspy_util.set_flags_in_fixed_headers(mseed_filename, flags)

    # Determine how many records (and thus fixed headers) must be updated
    # The second argument, `offset` is bytes into the mseed file (start at 0)
    record_info = obspy_util.get_record_information(mseed_filename, offset=0)
    number_of_records = record_info.get('number_of_records')

    # Loop over every record and apply the proper bits at the proper offsets
    record_offset = 0
    with open(mseed_filename, 'rb+') as mseed_file:
        for record_number in range(number_of_records):
            # Retrieve info concerning record at current offset
            record_info = obspy_util.get_record_information(mseed_filename,
                                                            offset=record_offset)

            # Format a binary string representing the time correction value
            # Type: 'LONG' (SEED manual) == 'l' (`struct` builtin)
            byte_order = record_info.get('byteorder')
            binstr_fmt = byte_order + 'l'
            time_correction_binstr = struct.pack(binstr_fmt, time_corr_one_ten_thous)

            # Set 'Time correction' value (Note 17; pg. 109)
            # Position: bytes 40-43 of the fixed header that precedes each record
            # The `record_offset` is in bytes relative to the start of the file
            time_correction_offset = record_offset + 40
            mseed_file.seek(time_correction_offset, 0)
            mseed_file.write(time_correction_binstr)

            # Find the offset of the next record relative to the start of the file
            record_offset += record_info.get('record_length')
Beispiel #9
0
    size = struct.calcsize(fmt)
    f.seek(offset, whence)
    binstr = f.read(size)
    val = struct.unpack(fmt, binstr)[0]

    return val


filename = 'test_data/20201226T005647.08_5FE6DF46.MER.DET.WLT5.mseed'

# Set "time correction applied" activity flag to True for all traces
flags = {'...': {'activity_flags': {'time_correction': True}}}
util.set_flags_in_fixed_headers(filename, flags)

# Get the byte ordering (big/little-endian)
rec_info = util.get_record_information(filename)
byte_order = rec_info.get('byteorder')
""" SEED Conventions

'How Binary Data Fields are Described in This Manual,' pp. 33-34

Field   #Bits    Description
BYTE        8    Unsigned quantity
UWORD      16    Unsigned quantity
LONG       32    Unsigned quantity

... and ...

'Fixed Section of Data Header (48 bytes),' pp. 108-110

Note    Length*   Start    End
Beispiel #10
0
def download_and_split_mseed_bulk(client, client_name, starttime, endtime,
                                  stations, logger):
    """
    Downloads the channels of a list of stations in bulk, saves it in the
    temp folder and splits it at the record level to obtain the final
    miniseed files.

    :param client:
    :param client_name:
    :param starttime:
    :param endtime:
    :param stations:
    :param temp_folder:
    :return:
    """
    bulk = []
    filenames = {}
    for station in stations:
        for channel in station.channels:
            net, sta, loc, chan = station.network, station.station, \
                channel.location, channel.channel
            filenames["%s.%s.%s.%s" % (net, sta, loc, chan)] = \
                channel.mseed_filename
            bulk.append((net, sta, loc, chan, starttime, endtime))

    temp_filename = NamedTemporaryFile().name

    try:
        client.get_waveforms_bulk(bulk, filename=temp_filename)

        open_files = {}
        # If that succeeds, split the old file into multiple new ones.
        file_size = os.path.getsize(temp_filename)
        with open(temp_filename, "rb") as fh:
            try:
                while True:
                    if fh.tell() >= (file_size - 256):
                        break
                    info = get_record_information(fh)

                    position = fh.tell()
                    fh.seek(position + 8, 0)
                    data = fh.read(12)
                    info["station"] = data[:5].strip().decode()
                    info["location"] = data[5:7].strip().decode()
                    info["channel"] = data[7:10].strip().decode()
                    info["network"] = data[10:12].strip().decode()
                    fh.seek(position, 0)

                    channel_id = "%s.%s.%s.%s" % (
                        info["network"], info["station"], info["location"],
                        info["channel"])
                    # Sometimes the services return something noone wants.
                    if channel_id not in filenames:
                        fh.read(info["record_length"])
                        continue
                    filename = filenames[channel_id]
                    if filename not in open_files:
                        open_files[filename] = open(filename, "wb")
                    open_files[filename].write(fh.read(info["record_length"]))
            finally:
                for f in open_files:
                    try:
                        f.close()
                    except:
                        pass
    finally:
        try:
            os.remove(temp_filename)
        except:
            pass
    logger.info("Client '%s' - Successfully downloaded %i channels (of %i)" % (
        client_name, len(open_files), len(bulk)))
    return open_files.keys()