Example #1
0
 def staging_required(self):
     maxbytes = getMaxCmdSize()
     if self.argsize > maxbytes:
         msg = "Command %s larger than %d bytes. Staging required."
         log.debug(msg, self.name, maxbytes)
         return False
     else:
         return True
Example #2
0
    def _run(self):
        try:
            while True:
                gevent.sleep(0)
                topic, message = self.sub.recv_string().split(' ', 1)
                log.debug('{} recieved message from {}'.format(self, topic))
                self.process(message, topic=topic)

        except Exception as e:
            log.error('Exception raised in {} while receiving messages: {}'
                       .format(self, e))
            raise(e)
Example #3
0
 def check(self, defn):
     """Performs the uniqueness check against the value list
     maintained in this rule objects
     """
     val = getattr(defn, self.attr)
     if val is not None and val in self.val_list:
         self.messages.append(self.msg % str(val))
         # TODO self.messages.append("TBD location message")
         self.valid = False
     elif val is not None:
         self.val_list.append(val)
         log.debug(self.val_list)
Example #4
0
def check_yaml_timestamps(yaml_file_name, cache_name):
    """
    Checks YAML configuration file timestamp and any 'included' YAML configuration file's
    timestamp against the pickle cache file timestamp.
    The term 'dirty' means that a yaml config file has a more recent timestamp than the
    pickle cache file.  If a pickle cache file is found to be 'dirty' (return true) the
    pickle cache file is not up-to-date, and a new pickle cache file must be generated.
    If the cache file in not 'dirty' (return false) the existing pickle binary will
    be loaded.

    param: yaml_file_name: str
        Name of the yaml configuration file to be tested
    param: cache_name: str
        Filename with path to the cached pickle file for this config file.

    return: boolean
        True:
            Indicates 'dirty' pickle cache: i.e. the file is not current, generate new binary
        False
            Load current cache file

    """
    # If no pickle cache exists return True to make a new one.
    if not os.path.exists(cache_name):
        log.debug('No pickle cache exists, make a new one')
        return True
    # Has the yaml config file has been modified since the creation of the pickle cache
    if os.path.getmtime(yaml_file_name) > os.path.getmtime(cache_name):
        log.info(
            f'{yaml_file_name} modified - make a new binary pickle cache file.'
        )
        return True
    # Get the directory of the yaml config file to be parsed
    dir_name = os.path.dirname(yaml_file_name)
    # Open the yaml config file to look for '!includes' to be tested on the next iteration
    with open(yaml_file_name, "r") as file:
        try:
            for line in file:
                if not line.strip().startswith("#") and "!include" in line:
                    check = check_yaml_timestamps(
                        os.path.join(dir_name,
                                     line.strip().split(" ")[2]), cache_name)
                    if check:
                        return True
        except RecursionError as e:
            print(
                f'ERROR: {e}: Infinite loop: check that yaml config files are not looping '
                f'back and forth to one another thought the "!include" statements.'
            )
    return False
Example #5
0
    def schema_val(self, messages=None):
        "Perform validation with processed YAML and Schema"
        self._ymlproc = YAMLProcessor(self._ymlfile)
        self._schemaproc = SchemaProcessor(self._schemafile)
        valid = True

        log.debug(
            "BEGIN: Schema-based validation for YAML '%s' with schema '%s'",
            self._ymlfile,
            self._schemafile,
        )

        # Make sure the yml and schema have been loaded
        if self._ymlproc.loaded and self._schemaproc.loaded:
            # Load all of the yaml documents. Could be more than one in the same YAML file.
            for docnum, data in enumerate(
                    yaml.load_all(self._ymlproc.data, Loader=yaml.Loader)):

                # Since YAML allows integer keys but JSON does not, we need to first
                # dump the data as a JSON string to encode all of the potential integers
                # as strings, and then read it back out into the YAML format. Kind of
                # a clunky workaround but it works as expected.
                data = yaml.load(json.dumps(data), Loader=yaml.Loader)

                # Now we want to get a validator ready
                v = jsonschema.Draft4Validator(self._schemaproc.data)

                # Loop through the errors (if any) and set valid = False if any are found
                # Display the error message
                for error in v.iter_errors(data):
                    msg = ("Schema-based validation failed for YAML file '" +
                           self._ymlfile + "'")
                    self.ehandler.process(docnum, self._ymlproc.doclines,
                                          error, messages)
                    valid = False

                if not valid:
                    log.error(msg)

        elif not self._ymlproc.loaded:
            raise util.YAMLError("YAML must be loaded in order to validate.")
        elif not self._schemaproc.loaded:
            raise jsonschema.SchemaError(
                "Schema must be loaded in order to validate.")

        log.debug("END: Schema-based validation complete for '%s'",
                  self._ymlfile)
        return valid
Example #6
0
    def publish(self, msg, topic=None):
        """
        Publishes input message with client name as the topic if the
        topic parameter is not provided.

        Publishes input message with topic as the topic if the
        topic parameter is provided. Topic can be an arbitrary string.
        """
        if not topic:
            topic = self.name
        msg = utils.encode_message(topic, msg)
        if msg is None:
            log.error(f"{self} unable to encode msg {msg} for send.")
            return

        self.pub.send_multipart(msg)
        log.debug("Published message from {}".format(self))
Example #7
0
    def _run(self):
        try:
            while True:
                gevent.sleep(0)
                msg = self.sub.recv_multipart()
                topic, message = utils.decode_message(msg)
                if topic is None or message is None:
                    log.error(
                        f"{self} received invalid topic or message. Skipping")
                    continue

                log.debug("{} received message from {}".format(self, topic))
                self.process(message, topic=topic)

        except Exception as e:
            log.error(
                "Exception raised in {} while receiving messages: {}".format(
                    self, e))
            raise (e)
Example #8
0
    def _decode_table_row(self, in_stream, raw=False):
        """Decode a table row from an input stream

        Attempt to read and decode a row of data from an input stream. If this
        runs out of a data on a "seemingly invalid column" (e.g., not the first)
        then raise an exception. Similarly, if any column decodes into None, this
        will raise an exception.

        Arguments:
            in_stream: A file-like object from which to read data.

            raw: Boolean indicating whether raw or enumerated values should be returned.
                (default: False which returns enumerated values if possible)

        Raises:
            ValueError: When an EOFError is encountered while decoding any column
                but the first or if any column decode returns None.
        """

        row = []
        for i, col in enumerate(self.coldefns):
            try:
                row.append(col.decode(in_stream, raw=raw))
            except EOFError:
                if i == 0:
                    log.debug("Table processing stopped when EOF reached")
                    return None
                else:
                    msg = (
                        "Malformed table data provided for decoding. End of file "
                        f"reached when processing table column {i} {col.name}")
                    log.info(msg)
                    raise ValueError(msg)

            if row[-1] is None:
                msg = f"Failed to decode table column {col.name}"
                log.error(msg)
                raise ValueError(msg)

        return row
Example #9
0
    def content_val(self, ymldata=None, messages=None):
        """Validates the Command Dictionary to ensure the contents for each of the fields
        meets specific criteria regarding the expected types, byte ranges, etc."""

        self._ymlproc = YAMLProcessor(self._ymlfile, False)

        # Turn off the YAML Processor
        log.debug("BEGIN: Content-based validation of Command dictionary")
        if ymldata is not None:
            cmddict = ymldata
        else:
            cmddict = cmd.CmdDict(self._ymlfile)

        try:
            # instantiate the document number. this will increment in order to
            # track the line numbers and section where validation fails
            docnum = 0

            # boolean to hold argument validity
            argsvalid = True

            # list of rules to validate against
            rules = []

            # set the command rules
            #
            # set uniqueness rule for command names
            rules.append(
                UniquenessRule("name", "Duplicate command name: %s", messages))

            # set uniqueness rule for opcodes
            rules.append(
                UniquenessRule("opcode", "Duplicate opcode: %s", messages))

            for key in cmddict.keys():
                cmddefn = cmddict[key]
                for rule in rules:
                    rule.check(cmddefn)

                # list of argument rules to validate against
                argrules = []

                # set rules for command arguments
                #
                # set uniqueness rule for opcodes
                argrules.append(
                    UniquenessRule(
                        "name",
                        "Duplicate argument name: " + cmddefn.name + ".%s",
                        messages,
                    ))

                # set type rule for arg.type
                argrules.append(
                    TypeRule(
                        "type",
                        "Invalid argument type for argument: " + cmddefn.name +
                        ".%s",
                        messages,
                    ))

                # set argument size rule for arg.type.nbytes
                argrules.append(
                    TypeSizeRule(
                        "nbytes",
                        "Invalid argument size for argument: " + cmddefn.name +
                        ".%s",
                        messages,
                    ))

                # set argument enumerations rule to check no enumerations contain un-quoted YAML special variables
                argrules.append(
                    EnumRule(
                        "enum",
                        "Invalid enum value for argument: " + cmddefn.name +
                        ".%s",
                        messages,
                    ))

                # set byte order rule to ensure proper ordering of aruguments
                argrules.append(
                    ByteOrderRule(
                        "bytes",
                        "Invalid byte order for argument: " + cmddefn.name +
                        ".%s",
                        messages,
                    ))
                #
                ###

                argdefns = cmddefn.argdefns
                for arg in argdefns:
                    # check argument rules
                    for rule in argrules:
                        rule.check(arg)

                # check if argument rule failed, if so set the validity to False
                if not all(r.valid is True for r in argrules):
                    argsvalid = False

            log.debug("END: Content-based validation complete for '%s'",
                      self._ymlfile)

            # check validity of all command rules and argument validity
            return all(rule.valid is True for rule in rules) and argsvalid

        except util.YAMLValidationError as e:
            # Display the error message
            if messages is not None:
                if len(e.message) < 128:
                    msg = ("Validation Failed for YAML file '" +
                           self._ymlfile + "': '" + str(e.message) + "'")
                else:
                    msg = "Validation Failed for YAML file '" + self._ymlfile + "'"

                log.error(msg)
                self.ehandler.process(docnum, self.ehandler.doclines, e,
                                      messages)
                return False
Example #10
0
    def content_val(self, ymldata=None, messages=None):
        """Validates the Telemetry Dictionary to ensure the contents for each of the fields
        meets specific criteria regarding the expected types, byte ranges, etc."""

        # Turn off the YAML Processor
        log.debug("BEGIN: Content-based validation of Telemetry dictionary")
        if ymldata is not None:
            tlmdict = ymldata
        else:
            tlmdict = tlm.TlmDict(self._ymlfile)

        try:
            # boolean to hold argument validity
            fldsvalid = True

            # list of rules to validate against
            rules = []

            # set the packet rules
            #
            # set uniqueness rule for packet names
            rules.append(
                UniquenessRule("name", "Duplicate packet name: %s", messages))

            # Loop through the keys and check each PacketDefinition
            for key in tlmdict.keys():
                pktdefn = tlmdict[key]
                # check the telemetry packet rules
                for rule in rules:
                    rule.check(pktdefn)

                # list of field rules to validate against
                fldrules = []

                # set rules for telemetry fields
                #
                # set uniqueness rule for field name
                fldrules.append(
                    UniquenessRule(
                        "name",
                        "Duplicate field name: " + pktdefn.name + ".%s",
                        messages,
                    ))

                # set type rule for field.type
                fldrules.append(
                    TypeRule(
                        "type",
                        "Invalid field type for field: " + pktdefn.name +
                        ".%s",
                        messages,
                    ))

                # set field size rule for field.type.nbytes
                fldrules.append(
                    TypeSizeRule(
                        "nbytes",
                        "Invalid field size for field: " + pktdefn.name +
                        ".%s",
                        messages,
                    ))

                # set field enumerations rule to check no enumerations contain un-quoted YAML special variables
                fldrules.append(
                    EnumRule(
                        "enum",
                        "Invalid enum value for field: " + pktdefn.name +
                        ".%s",
                        messages,
                    ))
                #
                ###

                flddefns = pktdefn.fields
                for fld in flddefns:
                    # check field rules
                    for rule in fldrules:
                        rule.check(fld)

                # check if field rule failed, if so set the validity to False
                if not all(r.valid is True for r in fldrules):
                    fldsvalid = False

            log.debug("END: Content-based validation complete for '%s'",
                      self._ymlfile)

            # check validity of all packet rules and field validity
            return all(rule.valid is True for rule in rules) and fldsvalid

        except util.YAMLValidationError as e:
            # Display the error message
            if messages is not None:
                if len(e.message) < 128:
                    msg = ("Validation Failed for YAML file '" +
                           self._ymlfile + "': '" + str(e.message) + "'")
                else:
                    msg = "Validation Failed for YAML file '" + self._ymlfile + "'"

                log.error(msg)
                self.ehandler.process(self.ehandler.doclines, e, messages)
                return False
Example #11
0
    def pretty(self, start, end, e, messages=None):
        """Pretties up the output error message so it is readable
        and designates where the error came from"""

        log.debug("Displaying document from lines '%i' to '%i'", start, end)

        errorlist = []
        if len(e.context) > 0:
            errorlist = e.context
        else:
            errorlist.append(e)

        for error in errorlist:
            validator = error.validator

            if validator == "required":
                # Handle required fields
                msg = error.message
                messages.append("Between lines %d - %d. %s" %
                                (start, end, msg))
            elif validator == "additionalProperties":
                # Handle additional properties not allowed
                if len(error.message) > 256:
                    msg = error.message[:253] + "..."
                else:
                    msg = error.message
                    messages.append("Between lines %d - %d. %s" %
                                    (start, end, msg))
            elif len(error.relative_path) > 0:
                # Handle other cases where we can loop through the lines

                # get the JSON path to traverse through the file
                jsonpath = error.relative_path
                array_index = 0

                current_start = start
                foundline = 0
                found = False

                context = collections.deque(maxlen=20)
                tag = "        <<<<<<<<< Expects: %s <<<<<<<<<\n" ""
                for cnt, _path in enumerate(error.relative_path):

                    # Need to set the key we are looking, and then check the array count
                    # if it is an array, we have some interesting checks to do
                    if int(cnt) % 2 == 0:
                        # we know we have some array account
                        # array_index keeps track of the array count we are looking for or number
                        # of matches we need to skip over before we get to the one we care about

                        # check if previous array_index > 0. if so, then we know we need to use
                        # that one to track down the specific instance of this nested key.
                        # later on, we utilize this array_index loop through
                        # if array_index == 0:
                        array_index = jsonpath[cnt]

                        match_count = 0
                        continue
                    elif int(cnt) % 2 == 1:
                        # we know we have some key name
                        # current_key keeps track of the key we are looking for in the JSON Path
                        current_key = jsonpath[cnt]

                    for linenum in range(current_start, end):
                        line = linecache.getline(self.ymlfile, linenum)

                        # Check if line contains the error
                        if ":" in line:
                            l_split = line.split(":")
                            key = l_split[0]
                            value = ":".join(l_split[1:])

                            # TODO:
                            # Handle maxItems TBD
                            # Handle minItems TBD
                            # Handle in-order (bytes) TBD
                            # Handle uniqueness TBD

                            # Handle cases where key in yml file is hexadecimal
                            try:
                                key = int(key.strip(), 16)
                            except ValueError:
                                key = key.strip()

                            if str(key) == current_key:
                                # check if we are at our match_count and end of the path
                                if match_count == array_index:
                                    # check if we are at end of the jsonpath
                                    if cnt == len(jsonpath) - 1:
                                        # we are at the end of path so let's stop here'
                                        if error.validator == "type":
                                            if value.strip() == str(
                                                    error.instance):
                                                errormsg = (
                                                    "Value '%s' should be of type '%s'"
                                                    % (
                                                        error.instance,
                                                        str(error.
                                                            validator_value),
                                                    ))
                                                line = line.replace(
                                                    "\n", (tag % errormsg))
                                                foundline = linenum
                                                found = True
                                            elif (value.strip() == ""
                                                  and error.instance is None):
                                                errormsg = "Missing value for %s." % key
                                                line = line.replace(
                                                    "\n", (tag % errormsg))
                                                foundline = linenum
                                                found = True

                                    elif not found:
                                        # print "EXTRA FOO"
                                        # print match_count
                                        # print array_index
                                        # print current_key
                                        # print line
                                        # otherwise change the start to the current line
                                        current_start = linenum
                                        break

                                match_count += 1

                        # for the context queue, we want to get the error to appear in
                        # the middle of the error output. to do so, we will only append
                        # to the queue in 2 cases:
                        #
                        # 1. before we find the error (found == False). we can
                        #    just keep pushing on the queue until we find it in the YAML.
                        # 2. once we find the error (found == True), we just want to push
                        #    onto the queue until the the line is in the middle
                        if not found or (found and context.maxlen >
                                         (linenum - foundline) * 2):
                            context.append(line)
                        elif found and context.maxlen <= (linenum -
                                                          foundline) * 2:
                            break

                    # Loop through the queue and generate a readable msg output
                    out = ""
                    for line in context:
                        out += line

                    if foundline:
                        msg = "Error found on line %d in %s:\n\n%s" % (
                            foundline,
                            self.ymlfile,
                            out,
                        )
                        messages.append(msg)

                        # reset the line it was found on and the context
                        foundline = 0
                        context.clear()

                    linecache.clearcache()
            else:
                messages.append(error.message)
Example #12
0
from bottle import request, Bottle
import gevent
import gevent.monkey
import gevent.pool
import gevent.socket

from ait.core import pcap, log

gevent.monkey.patch_all()

RAW_SOCKET_FD = None
try:
    import rawsocket
    RAW_SOCKET_FD = rawsocket.rawsocket_fd()
except ImportError:
    log.debug('The rawsocket library cannot be imported. '
              'Defaulting to the non-rawsocket approach.')
except IOError:
    log.info('Unable to spawn rawsocket-helper. '
             'This may be a permissions issue (not SUID root?). '
             'Defaulting to non-rawsocket approach.')

ETH_P_IP = 0x0800
ETH_P_ALL = 0x0003
ETH_PROTOCOL = ETH_P_ALL


class SocketStreamCapturer(object):
    ''' Class for logging socket data to a PCAP file. '''
    def __init__(self, capture_handlers, address, conn_type):
        '''
        Args:
Example #13
0
 def publish(self, msg):
     self.pub.sendto(msg, ("localhost", int(self.out_port)))
     log.debug("Published message from {}".format(self))
Example #14
0
def main():
    log.begin()

    parser = argparse.ArgumentParser(description=__doc__)

    parser.add_argument("--all", action="store_true", help="output all fields/values")

    parser.add_argument(
        "--csv",
        default="output.csv",
        metavar="</path/to/output/csv>",
        help="Output as CSV with filename",
    )

    parser.add_argument(
        "--fields",
        metavar="</path/to/fields/file>",
        help="path to the file containing all fields to query, separated by newline.",
    )

    parser.add_argument(
        "--packet", required=True, help="Packet name from telemetry dictionary specified in config file."
    )

    parser.add_argument(
        "--time_field",
        help=(
            "Time field to use for time range comparisons. Ground receipt time "
            "will be used if nothing is specified."
        ),
    )

    parser.add_argument(
        "--stime",
        help=(
            "Datetime in file to start collecting the data values. Defaults to "
            "beginning of pcap. Expected format: YYYY-MM-DDThh:mm:ssZ"
        ),
    )

    parser.add_argument(
        "--etime",
        help=(
            "Datetime in file to end collecting the data values. Defaults to end "
            "of pcap. Expected format: YYYY-MM-DDThh:mm:ssZ"
        ),
    )

    parser.add_argument(
        "pcap", nargs="*", help=("PCAP file(s) containing telemetry packets")
    )

    args = parser.parse_args()

    args.ground_time = True
    if args.time_field is not None:
        args.ground_time = False

    tlmdict = tlm.getDefaultDict()
    defn = None

    try:
        if tlmdict is not None:
            defn = tlmdict[args.packet]
    except KeyError:
        log.error('Packet "%s" not defined in telemetry dictionary.' % args.packet)
        log.end()
        sys.exit(2)

    if not args.all and args.fields is None:
        log.error(
            "Must provide fields file with --fields or specify that all fields should be queried with --all"
        )
        log.end()
        sys.exit(2)

    if args.all:
        fields = [flddefn.name for flddefn in defn.fields]
    else:
        # Parse the fields file into a list
        with open(args.fields, "r") as stream:
            fields = [fldname.strip() for fldname in stream.readlines()]

    not_found = False

    # TODO Rework this into the CSV generation. Not here.
    # Duplicating effort
    for fldname in fields:
        raw = fldname.split(".")
        if fldname not in defn.fieldmap and (
            len(raw) == 2 and raw[0] != "raw" or raw[1] not in defn.fieldmap
        ):
            not_found = True
            log.error('No telemetry point named "%s"' % fldname)

    if not_found:
        log.end()
        sys.exit(2)

    if args.stime:
        start = datetime.strptime(args.stime, dmc.ISO_8601_Format)
    else:
        start = dmc.GPS_Epoch

    if args.etime:
        stop = datetime.strptime(args.etime, dmc.ISO_8601_Format)
    else:
        stop = datetime.utcnow()

    # Append time to beginning of each row
    if not args.ground_time:
        fields.insert(0, args.time_field)
    else:
        fields.insert(0, "Ground Receipt Time")

    csv_file = None
    csv_writer = None
    npackets = 0
    if args.csv:
        csv_file = open(args.csv, "w")
        csv_writer = csv.writer(csv_file)

    output(csv_writer, fields)

    # If we're comparing off ground receipt time we need to drop the header label to avoid
    # indexing errors when processing the fields.
    if args.ground_time:
        fields = fields[1:]

    rowcnt = 0

    for filename in args.pcap:
        log.debug("Processing %s" % filename)

        with pcap.open(filename, "rb") as stream:
            header, data = stream.read()

            while data:
                packet = tlm.Packet(defn, data)

                comp_time = (
                    header.timestamp
                    if args.ground_time
                    else getattr(packet, args.time_field)
                )
                if start < comp_time < stop:
                    row = []
                    for field in fields:
                        try:
                            # check if raw value requested
                            _raw = False
                            names = field.split(".")
                            if len(names) == 2 and names[0] == "raw":
                                field = names[1]
                                _raw = True

                            field_val = packet._getattr(field, raw=_raw)

                            if hasattr(field_val, "name"):
                                field_val = field_val.name
                            else:
                                field_val = str(field_val)

                        except KeyError:
                            log.debug("%s not found in Packet" % field)
                            field_val = None
                        except ValueError:
                            # enumeration not found. just get the raw value
                            field_val = packet._getattr(field, raw=True)

                        row.append(field_val)

                    if args.ground_time:
                        row = [comp_time] + row

                    rowcnt += 1
                    output(csv_writer, row)

                npackets += 1
                header, data = stream.read()

    log.debug("Parsed %s packets." % npackets)

    csv_file.close()

    if rowcnt == 0:
        os.remove(args.csv)

    log.end()
Example #15
0
 def publish(self, msg):
     """
     Publishes input message with client name as topic.
     """
     self.pub.send("{} {}".format(self.name, msg))
     log.debug('Published message from {}'.format(self))
Example #16
0
 def handle(self, packet, address):
     # This function provided for gs.DatagramServer class
     log.debug('{} recieved message from port {}'.format(self, address))
     self.process(packet)
Example #17
0
 def publish(self, msg):
     self.pub.sendto(msg, ('localhost', int(self.out_port)))
     log.debug('Published message from {}'.format(self))
Example #18
0
import gevent.monkey
import gevent.pool
import gevent.socket

from ait.core import pcap, log

gevent.monkey.patch_all()

RAW_SOCKET_FD = None
try:
    import rawsocket  # type: ignore

    RAW_SOCKET_FD = rawsocket.rawsocket_fd()
except ImportError:
    log.debug(  # type: ignore
        "The rawsocket library cannot be imported. "
        "Defaulting to the non-rawsocket approach.")
except IOError:
    log.info(  # type: ignore
        "Unable to spawn rawsocket-helper. "
        "This may be a permissions issue (not SUID root?). "
        "Defaulting to non-rawsocket approach.")

ETH_P_IP = 0x0800
ETH_P_ALL = 0x0003
ETH_PROTOCOL = ETH_P_ALL


class SocketStreamCapturer(object):
    """Class for logging socket data to a PCAP file."""
    def __init__(self, capture_handlers, address, conn_type):
Example #19
0
def main():
    log.begin()

    description = """Parses 1553 telemetry into CSV file."""

    arguments = {
        '--all': {
            'action': 'store_true',
            'help': 'output all fields/values',
        },
        '--csv': {
            'type': str,
            'default': 'output.csv',
            'metavar': '</path/to/output/csv>',
            'help': 'Output as CSV with filename'
        },
        '--fields': {
            'type': str,
            'metavar': '</path/to/fields/file>',
            'help':
            'file containing all fields to query, separated by newline.'
        },
        '--packet': {
            'type': str,
            'required': True,
            'help': 'field names to query, separated by space'
        },
        '--time_field': {
            'type':
            str,
            'help':
            'Time field to use for time range comparisons. Ground receipt time will be used if nothing is specified.'
        },
        '--stime': {
            'type':
            str,
            'help':
            'Datetime in file to start collecting the data values. Defaults to beginning of pcap. Expected format: YYYY-MM-DDThh:mm:ssZ'
        },
        '--etime': {
            'type':
            str,
            'help':
            'Datetime in file to end collecting the data values. Defaults to end of pcap. Expected format: YYYY-MM-DDThh:mm:ssZ'
        }
    }

    arguments['pcap'] = {
        'nargs': '*',
        'help': 'PCAP file(s) containing telemetry packets'
    }

    args = gds.arg_parse(arguments, description)

    args.ground_time = True
    if args.time_field is not None:
        args.ground_time = False

    tlmdict = tlm.getDefaultDict()
    defn = None

    try:
        if tlmdict is not None:
            defn = tlmdict[args.packet]
    except KeyError:
        log.error('Packet "%s" not defined in telemetry dictionary.' %
                  args.packet)
        gds.exit(2)

    if not args.all and args.fields is None:
        log.error(
            'Must provide fields file with --fields or specify that all fields should be queried with --all'
        )
        gds.exit(2)

    if args.all:
        fields = [flddefn.name for flddefn in defn.fields]
    else:
        # Parse the fields file into a list
        with open(args.fields, 'rb') as stream:
            fields = [fldname.strip() for fldname in stream.readlines()]

    not_found = False

    # TODO Rework this into the CSV generation. Not here.
    # Duplicating effort
    for fldname in fields:
        raw = fldname.split('.')
        if fldname not in defn.fieldmap and (len(raw) == 2 and raw[0] != 'raw'
                                             or raw[1] not in defn.fieldmap):
            not_found = True
            log.error('No telemetry point named "%s"' % fldname)

    if not_found:
        gds.exit(2)

    if args.stime:
        start = datetime.strptime(args.stime, dmc.ISO_8601_Format)
    else:
        start = dmc.GPS_Epoch

    if args.etime:
        stop = datetime.strptime(args.etime, dmc.ISO_8601_Format)
    else:
        stop = datetime.utcnow()

    # Append time to beginning of each row
    if not args.ground_time:
        fields.insert(0, args.time_field)
    else:
        fields.insert(0, 'Ground Receipt Time')

    csv_file = None
    csv_writer = None
    npackets = 0
    if args.csv:
        csv_file = open(args.csv, 'wb')
        csv_writer = csv.writer(csv_file)

    output(csv_writer, fields)

    # If we're comparing off ground receipt time we need to drop the header label to avoid
    # indexing errors when processing the fields.
    if args.ground_time:
        fields = fields[1:]

    rowcnt = 0

    for filename in args.pcap:
        log.debug('Processing %s' % filename)

        with pcap.open(filename, 'rb') as stream:
            header, data = stream.read()

            while data:
                packet = tlm.Packet(defn, data)

                comp_time = header.timestamp if args.ground_time else getattr(
                    packet, args.time_field)
                if start < comp_time < stop:
                    row = []
                    for field in fields:
                        try:
                            # check if raw value requested
                            _raw = False
                            names = field.split('.')
                            if len(names) == 2 and names[0] == 'raw':
                                field = names[1]
                                _raw = True

                            fieldVal = packet._getattr(field, raw=_raw)

                            if hasattr(fieldVal, 'name'):
                                fieldVal = fieldVal.name
                            else:
                                fieldVal = str(fieldVal)

                        except KeyError:
                            log.debug('%s not found in Packet' % field)
                            fieldVal = None
                        except ValueError:
                            # enumeration not found. just get the raw value
                            fieldVal = packet._getattr(field, raw=True)

                        row.append(fieldVal)

                    if args.ground_time:
                        row = [comp_time] + row

                    rowcnt += 1
                    output(csv_writer, row)

                npackets += 1
                header, data = stream.read()

    log.debug('Parsed %s packets.' % npackets)

    csv_file.close()

    if rowcnt == 0:
        os.remove(args.csv)

    log.end()