Esempio n. 1
0
 def find(cls, fields):
     r = Config.redis()
     if not r:
         return []
     results = []
     ids = Query.get_unexpired()
     for i in ids:
         q = Query(q_id=i)
         if not q.query:
             # sometimes query meta data is incomplete, usually when I'm break^H^H^H^H^Htesting.
             continue
         for k, v in fields.items():
             if k in ('after-ago', 'after', 'before-ago', 'before'):
                 dur = parse_duration(v)
                 if dur:
                     v = (datetime.utcnow() - dur)
                 else:
                     v = inputs.datetime_from_iso8601(v)
                     pass
                 if (q.queried < v) and k in ('after-ago', 'after'):
                     q = None
                     break
                 elif (q.queried > v) and k in ('before-ago', 'before'):
                     q = None
                     break
                 pass
             elif k in ('sensors', ):
                 if frozenset(q.sensors) != frozenset(v):
                     q = None
                     break
             elif k in ('limit-packets', 'limit-bytes'):
                 continue
             elif k not in q.query:
                 Config.logger.info("Skipping: {} - {}".format(q.query, k))
                 q = None
                 break
             else:
                 if is_sequence(v) and v != [
                         vi for vi in v if q.query.find(vi) >= 0
                 ]:
                     Config.logger.info("Skipping: {} - {}".format(
                         q.query, v))
                     q = None
                     break
                 elif is_str(v) and v not in q.query:
                     Config.logger.info("Skipping: {} - {}".format(
                         q.query, v))
                     q = None
                     break
         if q:
             results.append(q.json())
     return results
Esempio n. 2
0
def cleanup(force=None):
    """ Delete queries until EXPIRE config is satisfied:
        1 - Delete anything older than EXPIRE_TIME seconds
        2 - Delete the oldest queries until we have at least EXPIRE_SPACE bytes free
    """
    period = parse_duration(Config.get('CLEANUP_PERIOD', 0))
    now = datetime.utcnow()
    global _LAST_CLEANED
    if not force and period and _LAST_CLEANED + period < now:
        Config.logger.debug("Cleaned recently, aborting: {}".format(
            _LAST_CLEANED.strftime(Config.get('DATE_FORMAT'))))
        return

    _LAST_CLEANED = datetime.utcnow()

    from heapq import heapify, heappop

    Config.logger.info("Running Cleanup: {}".format(now.strftime(ISOFORMAT)))
    ids = Query.get_unexpired()
    ordered = [(file_modified(Query.job_path_for_id(i)), i) for i in ids]
    heapify(ordered)
    Config.logger.info("Cleaning: {}".format(
        {v: o.strftime(ISOFORMAT)
         for o, v in ordered}))

    if EXPIRE_TIME:
        expiring = []
        while ordered and ordered[0][0] + EXPIRE_TIME < now:
            _, q_id = heappop(ordered)
            expiring.append(q_id)
        Config.logger.info("Deleting old queries: {}".format(expiring))
        expiring = Query.expire_now(expiring)
        if expiring:
            Config.logger.error("Couldn't delete: {}".format(expiring))

    if EXPIRE_SPACE > 0:
        free_bytes = spool_space().bytes
        while ordered and free_bytes < EXPIRE_SPACE:
            _, q_id = heappop(ordered)
            Config.logger.info("Deleting for space: {}".format(q_id))
            Query.expire_now(q_id)
            free_bytes = spool_space().bytes
Esempio n. 3
0
_INSTANCES = Config.get('STENOGRAPHER_INSTANCES')
for _ in _INSTANCES:
    _['idle'], _['stats'] = (from_epoch(0), {})

# IDLE_TIME - 5, assume stenoboxes remain IDLE for 5 seconds and check again after that.
IDLE_TIME = Config.setdefault('IDLE_TIME', 5, minval=1)
# IDLE_SLEEP - 2.0, wait time between IDLE queries. will occur at least every IDLE_TIME.
IDLE_SLEEP = Config.setdefault('IDLE_SLEEP', 2.0, minval=0)
# STAT_TIMEOUT - 3.0, assume stenoboxes are broken if stats doesn't return in this many seconds
STAT_TIMEOUT = Config.setdefault('STAT_TIMEOUT', 3.0, minval=0.1)
# QUERY_TIMEOUT- 720, assume stenoboxes are broken if a query doesn't return in this many seconds
QUERY_TIMEOUT = Config.setdefault('QUERY_TIMEOUT', 720.0, minval=5)
# MERGED_NAME - name of the final result pcap
MERGED_NAME = Config.setdefault('MERGED_NAME', "merged")

EXPIRE_TIME = parse_duration(Config.get('EXPIRE_TIME', 0))
EXPIRE_SPACE = parse_capacity(Config.get('EXPIRE_SPACE', 0))


# stats are not enqueueble because we always want them immediately.
def get_stats(selected_sensors=None):
    """ return a dictionary of { sensorname: {stats} }
        NOTE: runs immediately not - used when we _need_ a result
    """
    Config.logger.debug("Get Stats: {}".format(selected_sensors))
    if is_str(selected_sensors):
        selected_sensors = (selected_sensors, )
    datas = {}

    global _INSTANCES
    for instance in _INSTANCES:
Esempio n. 4
0
class Query:
    """ Query               handles metadata and actions to process docket queries:
        q = Query(f)        creates a query from a dict of fields
        l = Query(idstr)    creates a query by loading the query file in SPOOL_DIR/idstr (standard location)
        t = Query(tuple)    creates a query from tuple created by .tupify()
        q.enqueue()         send the query to celery to start processing.

        Query(f).enqueue()  shorthand
    """
    Tuple = namedtuple('QueryTuple', ['query', 'time'])

    LONG_AGO = -1 * abs(parse_duration(Config.get('LONG_AGO', '24h')))

    WEIGHTS = {
        'enabled': (bool(Config.get('WEIGHT_TOTAL'))
                    and bool(Config.get('WEIGHT_THRESHOLD'))
                    and bool(Config.get('WEIGHT_HOURS'))),
        'total':
        parse_capacity(Config.get('WEIGHT_TOTAL')),
        'limit':
        parse_capacity(Config.get('WEIGHT_THRESHOLD')),
        'port':
        Config.get('WEIGHT_PORTS', 100.0),
        'hour':
        Config.get('WEIGHT_HOURS'),
        'net':
        Config.get('WEIGHT_NETS', 2.0),
        'ip':
        Config.get('WEIGHT_IPS', 50.0),
    }

    # TIME_RX = re.compile(r'after (?P<after>\d{4}-\d\d-\d\dT\d\d:\d\d:\d\dZ) and before (?P<before>\d{4}-\d\d-\d\dT\d\d:\d\d:\d\dZ)')
    EMPTY_THRESHOLD = parse_capacity(Config.get('EMPTY_THRESHOLD', '25B'))
    # Query.events can have a 'state': states are used to filter events for display
    RECEIVING = 'Requesting'
    RECEIVED = 'Request Complete'
    EMPTY = 'Completed. no packets returned'
    FINISHED = 'Finished'
    CREATED = 'Created'
    SUCCESS = 'Completed'
    MERGE = 'Merging'
    ERROR = 'Error'
    FAIL = 'Failed'
    FINAL_STATES = (
        FAIL,
        SUCCESS,
    )

    def __init__(self, fields=None, qt=None, q_id=None, query=None):
        self.query = None  # string: query formatted for stenoapi
        self.state = None  # string: one of (RECEIVING, RECEIVED, MERGE, SUCCESS, ERROR, FAIL)
        self._id = None  # a hash of the normalized query string, uniquely identifies this query to prevent duplicates
        self.events = [
        ]  # a sorted list of events including errors... TODO FIX ME

        if qt:
            self._detupify(qt)

        if q_id and is_str(q_id) and len(q_id) >= 32:
            self.load(q_id=q_id)

        if query and is_str(query):
            self.query = query

        if fields and isinstance(fields, dict):
            self._build_query(fields)

    def load(self, path=None, q_id=None, from_file=False):
        """ query data, if no path is provided use the default (requires id)
            path - ignore redis and read the file from the given path
            from_file - ignore redis and read the file from disk
        """
        if path is not None:
            if os.path.exists(path):
                return update_yaml(path, self) != False
            self.error('load', "No Such path {}".format(path))
            return False
        elif Config.redis() and not from_file:
            r = Config.redis()
            key = _REDIS_PREFIX + (q_id or self.id)
            old = r.get(key)
            if old:
                old = yaml.load(old)
                self.update(old)
                return True
        q_id = q_id or self.id
        if q_id:
            return update_yaml(Query.yaml_path_for_id(q_id), self) != False
        raise Exception("Can't load a Query without an ID: {}".format(self))

    def save(self, path=None, q_id=None, to_file=False):
        """ save this query to the default location, clobbering old values
            path - write the query to the given file path and return True if successful. Overrides other keyargs
            to_file - ensure the file on disk is written, will also write to redis if configured
        """
        Config.logger.info("Query Save state:{}, Last Event:{}".format(
            self.state, self.events[-1]))
        if path is not None:
            if os.path.exists(path):
                return write_yaml(path, self)
            self.error('load', "No Such path {}".format(path))
            return False
        if Config.redis():
            r = Config.redis()
            key = _REDIS_PREFIX + self.id
            r.set(key, yaml.dump(self))
            if not to_file:
                return True
        q_id = q_id or self.id
        if q_id:
            return write_yaml(Query.yaml_path_for_id(q_id), self)
        Exception("Can't save Query {}".format(self))

    def update(self, other):
        """ update self from other's data,
            other can be a Query, dict, or yaml string.
        """
        Config.logger.debug("updating: {} with {}".format(self, other))
        if type(other) is dict and other:
            Config.logger.debug("Dict update: {} with {}".format(self, other))
            recurse_update(self.__dict__, other, ignore_none=True)
            self._fix_events()
            return True
        elif isinstance(other, Query):
            Config.logger.debug("instance update: {} with {}".format(
                self, other))
            recurse_update(self.__dict__, other.__dict__, ignore_none=True)
            self._fix_events()
            return True
        elif type(other) is str:
            return self.update(yaml.load(other))
        return False

    def tupify(
        self
    ):  # We can't queue an object. This is all the data we need to use queue in the celery worker
        """ Serializes the basic values used to define a query into a tuple """
        return Query.Tuple(self.query, self.queried.strftime(ISOFORMAT))

    def _detupify(self, query_tuple):
        query_tuple = Query.Tuple(
            *query_tuple)  # namedTuples lose their names when serialized
        self.query = query_tuple.query
        self.events = [
            Event(datetime=datetime.strptime(query_tuple.time, ISOFORMAT),
                  name=Query.CREATED,
                  msg=None,
                  state=Query.CREATED)
        ]
        self._fix_events()

    def _fix_events(self):
        """ resort events, the list order can be wrong when loaded from a file """
        self.events.sort()

    @property
    def id(self):
        """ hexdigest of the query - uniquely identifies the request based on the query string (assumes query times are absolute) """
        if self._id is not None:
            return self._id
        elif self.query:
            # calculate a 'query' hash to be our id
            self._id = md5(self.query)
            if Config.get('UUID_FORMAT'):
                self._id = '-'.join(
                    (self._id[:8], self._id[8:12], self._id[12:16],
                     self._id[16:20], self._id[20:]))
            return self._id

    @property
    def queried(self):
        """ shortcut for 'when a user submitted this query' """
        if not self.events:
            self.progress(Query.CREATED, state=Query.CREATED)
        return self.events[0].datetime

    @property
    def query_time(self):
        """ force 'queried' into a TIME_WINDOW for use in a query string """
        return enforce_time_window(self.queried)

    def __str__(self):
        return "[{}]".format(self.id if self.query else "partial QUERY")

    def time_requested(self):
        """ returns an ISO date (ordinal) - when the query was requested """
        return self.queried.strftime(_DATE_FORMAT)[:-3]

    @property
    def yaml_path(self):
        return Query.yaml_path_for_id(self.id)

    @classmethod
    def yaml_path_for_id(cls, q_id):
        """  path used to record this query as yaml """
        return cls.job_path_for_id(q_id,
                                   Config.get('QUERY_FILE', 'query') + '.yaml')

    def path(self, path):
        return self.job_path_for_id(self._id, path)

    @property
    def job_path(self):
        """ path for this query's artifacts """
        return self.job_path_for_id(self._id)

    @staticmethod
    def job_path_for_id(
            q_id,
            path=None
    ):  # ensure canonical naming - everything uses this function
        parts = (Config.get('SPOOL_DIR'), q_id)
        if path:
            parts += (path, ) if is_str(path) else path
        return os.path.join(*parts)

    @property
    def invalid(self):
        """ checks if this query is valid, and returns a dict of things that are wrong """
        if self.query is None:
            self.error("invalid", "No query string")
        if self.id is None:
            self.error("invalid", "No Id ")
        if not self.events:
            self.error("invalid", "No request time")
        return self.errors

    @property
    def errors(self):
        """ returns a dictionary of errors recorded about this query """
        return [e for e in self.events if e.state in (Query.ERROR, Query.FAIL)]

    def error(self, name, msg, status=None):
        """ adds an error event to the query ie: error('stenographer', "Bad Request" )
            if LOG_LEVEL is 'debug', add the caller's name and line number
        """
        if Config.get("DEBUG"):
            from inspect import getframeinfo, stack
            caller = getframeinfo(stack()[1][0])
            msg = "{}:{} - {}".format(caller.filename, caller.lineno, msg)
            del caller  # This delete is redundant, but a stack frame contains multitudes: we _must not_ keep it around
        self.progress(name, msg, status or Query.ERROR)

    def info(self, msg):
        Config.logger.info("Query: " + msg)

    def _build_query(self, fields):
        Config.logger.debug("_build_query {}".format(str(fields)))

        q_fields = {
            'host': [],
            'net': [],
            'port': [],
            'proto': None,
            'proto-name': None,
            'after-ago': None,
            'before-ago': None,
            'after': None,
            'before': None,
        }
        q_fields.update(fields)
        self.progress(Query.CREATED, state=Query.CREATED)

        start = self.query_time + self.LONG_AGO
        end = self.query_time + timedelta(minutes=1)

        qry_str = []
        weights = {'ip': 0, 'net': 0, 'port': 0}
        for host in sorted(q_fields['host']):
            Config.logger.debug("Parsing host: %s", host)
            if len(host) == 0:
                continue

            validate_ip(host)
            qry_str.append('host {}'.format(host))
            weights['ip'] += 1
        for net in sorted(q_fields['net']):
            Config.logger.debug("Parsing net: %s", net)
            if len(net) == 0:
                continue

            validate_net(net)
            qry_str.append('net {}'.format(net))
            weights['net'] += 1
        for port in sorted(q_fields['port']):
            Config.logger.debug("Parsing port: %s", port)
            try:
                if 0 < int(port) < 2**16:
                    qry_str.append('port {}'.format(int(port)))
                    weights['port'] += 1
                else:
                    raise ValueError()
            except ValueError:
                raise BadRequest("Port {} out of range: 1-65535".format(port))
        if q_fields['proto']:
            try:
                if 0 < int(q_fields['proto']) < 2**8:
                    qry_str.append('ip proto {}'.format(q_fields['proto']))
                else:
                    raise ValueError()
            except ValueError:
                raise BadRequest(
                    "protocol number {} out of range 1-255".format(
                        q_fields['proto']))
        if q_fields['proto-name']:
            if q_fields['proto-name'].upper() not in ['TCP', 'UDP', 'ICMP']:
                raise BadRequest(description="Bad proto-name: {}".format(
                    q_fields['proto-name']))
            qry_str.append(q_fields['proto-name'].lower())
        if q_fields['after-ago']:
            dur = parse_duration(q_fields['after-ago'])
            if not dur:
                raise BadRequest("can't parse duration: {}".format(
                    q_fields['after-ago']))
            start = enforce_time_window(self.query_time - dur)
        if q_fields['before-ago']:
            dur = parse_duration(q_fields['before-ago'])
            if not dur:
                raise BadRequest("can't parse duration: {}".format(
                    q_fields['before-ago']))
            end = enforce_time_window(self.query_time - dur)
        if q_fields['after']:
            print "Processing 'after': {}".format(q_fields['after'])
            dur = parse_duration(q_fields['after'])
            print "Duration {}".format(dur)
            if dur:
                start = enforce_time_window(self.query_time - dur)
                print "Start w/ duration: {}".format(start)
            else:
                start = enforce_time_window(
                    inputs.datetime_from_iso8601(
                        q_fields['after']).replace(tzinfo=None))
                print "Start w/o duration: {}".format(start)

        if q_fields['before']:
            dur = parse_duration(q_fields['before'])
            if dur:
                end = enforce_time_window(self.query_time - dur)
            else:
                end = enforce_time_window(
                    inputs.datetime_from_iso8601(
                        q_fields['before']).replace(tzinfo=None))
            end += timedelta(seconds=Config.get('TIME_WINDOW'))

        # Check the request's 'weight'
        if Query.WEIGHTS['enabled'] and not q_fields.get('ignore-weight'):
            req_weight = (Query.WEIGHTS['total'] *
                          ((end - start).total_seconds() /
                           (Query.WEIGHTS['hour'] * 3600)) / (sum(
                               (val * Query.WEIGHTS[k]
                                for k, val in weights.items())) or 1))
            if req_weight > Query.WEIGHTS['limit']:
                self.error(
                    'build_query', "Request is too heavy: {}/{}:\t{}".format(
                        req_weight, Query.WEIGHTS['limit'], jsonify(q_fields)))
                raise BadRequest("Request parameters exceed weight: %d/%d" %
                                 (req_weight, Query.WEIGHTS['limit']))

        qry_str.append('after {}'.format(start.strftime(ISOFORMAT)))
        qry_str.append('before {}'.format(end.strftime(ISOFORMAT)))

        self.query = " and ".join(qry_str)
        if not self.query:
            Config.logger.info("Bad request: {}".format(jsonify(q_fields)))
            return None

        Config.logger.debug("build_query: <{}>".format(self.query))

        # if we want to support limiting the query, it would require rethinking our duplicate detection
        #if q_fields['sensors']:
        #    self.sensors = q_fields['sensors']
        return self.id

    @classmethod
    def find(cls, fields):
        r = Config.redis()
        if not r:
            return []
        results = []
        ids = Query.get_unexpired()
        for i in ids:
            q = Query(q_id=i)
            if not q.query:
                # sometimes query meta data is incomplete, usually when I'm break^H^H^H^H^Htesting.
                continue
            for k, v in fields.items():
                if k in ('after-ago', 'after', 'before-ago', 'before'):
                    dur = parse_duration(v)
                    if dur:
                        v = (datetime.utcnow() - dur)
                    else:
                        v = inputs.datetime_from_iso8601(v)
                        pass
                    if (q.queried < v) and k in ('after-ago', 'after'):
                        q = None
                        break
                    elif (q.queried > v) and k in ('before-ago', 'before'):
                        q = None
                        break
                    pass
                elif k in ('sensors', 'limit-packets', 'limit-bytes'):
                    continue
                elif k not in q.query:
                    Config.logger.info("Skipping: {} - {}".format(q.query, k))
                    q = None
                    break
                else:
                    if is_sequence(v) and v != [
                            vi for vi in v if q.query.find(vi) >= 0
                    ]:
                        Config.logger.info("Skipping: {} - {}".format(
                            q.query, v))
                        q = None
                        break
                    elif is_str(v) and v not in q.query:
                        Config.logger.info("Skipping: {} - {}".format(
                            q.query, v))
                        q = None
                        break
            if q:
                results.append(q.json())
        return results

    @staticmethod
    def thead():
        col = lambda k, s, t: {"key": k, "str": s, "type": t}
        columns = [
            col("state", "State", "string"),
            col("time", "Time requested", "string"),
            col("id", "ID", "id"),
            col("url", "Pcap URL", "url"),
            col("query", "Query", "string"),
        ]
        return columns

    def json(self):
        return {
            'id': self.id,
            'state': self.state,
            'query': self.query,
            'url': self.pcap_url,
            'time': self.time_requested(),
            'query': self.query
        }

    def enqueue(self):
        """ queue this query in celery for fulfillment """
        if self.invalid:
            raise Exception("Invalid Query " + self.errors)
        from tasks import query_task
        query_task.apply_async(queue='query',
                               kwargs={'query_tuple': self.tupify()})
        return self.json()

    @property
    def pcap_url(self):
        return self.pcap_url_for_id(self.id)

    @staticmethod
    def pcap_url_for_id(id):
        return "{}/{}/{}.pcap".format(Config.get('PCAP_WEB_ROOT', '/results'),
                                      id, Config.get('MERGED_NAME'))

    @property
    def pcap_path(self):
        return Query.pcap_path_for_id(self.id)

    @staticmethod
    def pcap_path_for_id(q_id):
        if q_id:
            return os.path.join(Config.get('SPOOL_DIR'), q_id,
                                '%s.pcap' % Config.get('MERGED_NAME'))

    def complete(self, state=SUCCESS):
        """ why a separate method: because someone will definitely want to check for the 'completed' status so it better be reliably named """
        if state not in Query.FINAL_STATES:
            raise ValueError("query.complete() requires a 'FINAL_STATE'")
        self.progress(Query.FINISHED, state=state)

    def progress(self, name, msg=None, state=None):
        """ Record the time 'action' occurred """
        e = Event(datetime.utcnow(), name, msg, state)
        self.events.append(e)

        if self.state not in Query.FINAL_STATES:
            self.state = state if state is not None else self.state

        if state in (Query.ERROR, Query.FAIL):
            Config.logger.warning("Query[{}] Error: {}:{}".format(
                self.id, name, msg or ''))
        else:
            Config.logger.info("Query[{}] {}:{}:{}".format(
                self.id, name, msg or '', state))
        return e

    def result(self, name, msg=None, state=None, value=None):
        """ Record the result of a stenographer query """
        result = Result(datetime=datetime.utcnow(),
                        name=name,
                        msg=msg,
                        state=state,
                        value=value)
        self.events.append(result)
        Config.logger.info("Query[{}] {}:{}".format(self.id, name, msg or ''))
        return result

    @property
    def successes(self):
        """ a list of successful query results """
        return [
            e for e in self.events
            if type(e) is Result and e.state == Query.RECEIVED
        ]

    @staticmethod
    def get_unexpired(ids=None):
        """ return a list of query IDs that are still available on the drive
            the queries can be in various states of processing
        """
        return [
            f for f in readdir(Config.get('SPOOL_DIR'))
            if (f not in ['.', '..']) and (not ids or f in ids) and (
                len(f) >= 32)
        ]

    def status(self, full=False):
        """ describe this query's state in detail """
        if self.events:
            status = {
                'state': self.state,
                'requests': {
                    r.name: r._asdict()
                    for r in self.events if type(r) is Result
                },
                'events':
                [e._asdict() for e in self.events if full or e.state],
                'successes': [e._asdict() for e in self.successes],
            }
            return status
        return {'state': self.state}

    @staticmethod
    def status_for_ids(ids):
        """ return a dictionary of id : {status} """
        if type(ids) is str:
            ids = list(ids)
        return {i: Query(q_id=i).status(full=True) for i in ids}

    @staticmethod
    def expire_now(ids=None):
        """ Deletes queries with supplied ids.
            Returns a list of ids that couldn't be deleted.
            Ignore non-existent entries
        """
        from shutil import rmtree
        if not ids:
            return False
        errors = []
        if type(ids) is str:
            ids = (ids, )
        for i in ids:
            try:
                path = Query.job_path_for_id(i)
                if os.path.exists(path):
                    rmtree(path)
                else:
                    Config.logger.info("expire_now: no {} to expire".format(i))
            except OSError as e:
                errors.append(i)
                Config.logger.error("Query: Unable to delete {}: {}".format(
                    i, str(e)))
            except:
                errors.append(i)
                Config.logger.error("Query: Unable to delete {}".format(i))
        return errors
Esempio n. 5
0
    def _build_query(self, fields):
        Config.logger.debug("_build_query {}".format(str(fields)))

        q_fields = {
            'host': [],
            'net': [],
            'port': [],
            'proto': None,
            'proto-name': None,
            'after-ago': None,
            'before-ago': None,
            'after': None,
            'before': None,
        }
        q_fields.update(fields)
        self.progress(Query.CREATED, state=Query.CREATED)

        start = self.query_time + self.LONG_AGO
        end = self.query_time + timedelta(minutes=1)

        qry_str = []
        weights = {'ip': 0, 'net': 0, 'port': 0}
        for host in sorted(q_fields['host']):
            Config.logger.debug("Parsing host: %s", host)
            if len(host) == 0:
                continue

            validate_ip(host)
            qry_str.append('host {}'.format(host))
            weights['ip'] += 1
        for net in sorted(q_fields['net']):
            Config.logger.debug("Parsing net: %s", net)
            if len(net) == 0:
                continue

            validate_net(net)
            qry_str.append('net {}'.format(net))
            weights['net'] += 1
        for port in sorted(q_fields['port']):
            Config.logger.debug("Parsing port: %s", port)
            try:
                if 0 < int(port) < 2**16:
                    qry_str.append('port {}'.format(int(port)))
                    weights['port'] += 1
                else:
                    raise ValueError()
            except ValueError:
                raise BadRequest("Port {} out of range: 1-65535".format(port))
        if q_fields['proto']:
            try:
                if 0 < int(q_fields['proto']) < 2**8:
                    qry_str.append('ip proto {}'.format(q_fields['proto']))
                else:
                    raise ValueError()
            except ValueError:
                raise BadRequest(
                    "protocol number {} out of range 1-255".format(
                        q_fields['proto']))
        if q_fields['proto-name']:
            if q_fields['proto-name'].upper() not in ['TCP', 'UDP', 'ICMP']:
                raise BadRequest(description="Bad proto-name: {}".format(
                    q_fields['proto-name']))
            qry_str.append(q_fields['proto-name'].lower())
        if q_fields['after-ago']:
            dur = parse_duration(q_fields['after-ago'])
            if not dur:
                raise BadRequest("can't parse duration: {}".format(
                    q_fields['after-ago']))
            start = enforce_time_window(self.query_time - dur)
        if q_fields['before-ago']:
            dur = parse_duration(q_fields['before-ago'])
            if not dur:
                raise BadRequest("can't parse duration: {}".format(
                    q_fields['before-ago']))
            end = enforce_time_window(self.query_time - dur)
        if q_fields['after']:
            print "Processing 'after': {}".format(q_fields['after'])
            dur = parse_duration(q_fields['after'])
            print "Duration {}".format(dur)
            if dur:
                start = enforce_time_window(self.query_time - dur)
                print "Start w/ duration: {}".format(start)
            else:
                start = enforce_time_window(
                    inputs.datetime_from_iso8601(
                        q_fields['after']).replace(tzinfo=None))
                print "Start w/o duration: {}".format(start)

        if q_fields['before']:
            dur = parse_duration(q_fields['before'])
            if dur:
                end = enforce_time_window(self.query_time - dur)
            else:
                end = enforce_time_window(
                    inputs.datetime_from_iso8601(
                        q_fields['before']).replace(tzinfo=None))
            end += timedelta(seconds=Config.get('TIME_WINDOW'))

        # Check the request's 'weight'
        if Query.WEIGHTS['enabled'] and not q_fields.get('ignore-weight'):
            req_weight = (Query.WEIGHTS['total'] *
                          ((end - start).total_seconds() /
                           (Query.WEIGHTS['hour'] * 3600)) / (sum(
                               (val * Query.WEIGHTS[k]
                                for k, val in weights.items())) or 1))
            if req_weight > Query.WEIGHTS['limit']:
                self.error(
                    'build_query', "Request is too heavy: {}/{}:\t{}".format(
                        req_weight, Query.WEIGHTS['limit'], jsonify(q_fields)))
                raise BadRequest("Request parameters exceed weight: %d/%d" %
                                 (req_weight, Query.WEIGHTS['limit']))

        qry_str.append('after {}'.format(start.strftime(ISOFORMAT)))
        qry_str.append('before {}'.format(end.strftime(ISOFORMAT)))

        self.query = " and ".join(qry_str)
        if not self.query:
            Config.logger.info("Bad request: {}".format(jsonify(q_fields)))
            return None

        Config.logger.debug("build_query: <{}>".format(self.query))

        # if we want to support limiting the query, it would require rethinking our duplicate detection
        #if q_fields['sensors']:
        #    self.sensors = q_fields['sensors']
        return self.id