示例#1
0
  def test_read(self):
    # Create records using synthetic, randomized data id and write to db
    test_num = random.randint(0,100000)
    parser = NMEAParser()
    writer = DatabaseWriter(database='test', host='localhost',
                            user='******', password='******')
    writer.db.exec_sql_command('truncate table data')

    reader = DatabaseReader(database='test', host='localhost',
                            user='******', password='******')

    # Write to database, automatically creating table
    records = [parser.parse_record(s) for s in SAMPLE_DATA]
    index = 0
    for record in records:
      logging.debug('Writing record "%s"', str(record))
      writer.write(record)

      result = True
      while result:
        result = writer.db.read()
        logging.info('Read %d: %s', index, result)
        if result:
          self.assertEqual(result, SAMPLE_RESULTS[index])
          index += 1

    # Test range: read a range that should include 3 records
    results = reader.read_range(start=2, stop=5)
    self.assertEqual(results, {'S330GPSDay': [(1509778447.17313, 7)],
                               'S330GPSMonth': [(1509778447.17313, 8)],
                               'S330GPSYear': [(1509778447.17313, 2014)]})

    # Next record should be one after that
    result = reader.read()
    self.assertEqual(result, {'S330GPSTime': [(1509778449.210395, 2034.17)]})

    # Test time_range: read a range that should include 3 records
    results = reader.read_time_range(start_time=1509778449.210395,
                                     stop_time=1509778453.290818)
    self.assertEqual(results, {'S330GPSTime': [(1509778451.248784, 2035.17),
                                               (1509778453.290817, 2036.17)],
                               'S330GPSDay': [(1509778451.248784, 7),
                                              (1509778453.290817, 7)],
                               'S330GPSMonth': [(1509778451.248784, 8),
                                                (1509778453.290817, 8)],
                               'S330GPSYear': [(1509778451.248784, 2014),
                                               (1509778453.290817, 2014)]})
    # Next record should be one after that
    result = reader.read()
    self.assertEqual(result, {'S330GPSTime': [(1509778455.328116, 2037.17)]})

    writer.db.close()
    reader.db.close()
示例#2
0
        else:
          readers.append(CachedDataReader(subscription=subscription))

      # For each comma-separated spec, parse out values for
      # user@host:database:data_id[:message_type]. We count on
      # --database_password having been specified somewhere.
      if new_args.database:
        password = all_args.database_password
        (user, host_db) = new_args.database.split('@')
        (host, database) = host_db.split(':', maxsplit=1)
        if ':' in database:
          (database, fields) = database.split(':')
        else:
          fields = None
        readers.append(DatabaseReader(fields=fields,
                                      database=database, host=host,
                                      user=user, password=password))

      # SerialReader is a little more complicated than other readers
      # because it can take so many parameters. Use the kwargs trick to
      # pass them all in.
      if new_args.serial:
        kwargs = {}
        for pair in new_args.serial.split(','):
          (key, value) = pair.split('=')
          kwargs[key] = value
        readers.append(SerialReader(**kwargs))

      ##########################
      # Transforms
      if new_args.slice:
示例#3
0
    async def serve_fields(self, field_list):
        """Serve data, if it exists, from database, if it exists, using default
    database location, tables, user and password.

    NOTE: This is the kind of code your mother warned you about. It's a
    quick first pass, and will therefore follow me to my grave and haunt
    you for years to come. For the love of Guido, please clean this up.
    """
        for (field_name, num_secs) in field_list:
            logging.info('Requesting field: %s, %g secs.', field_name,
                         num_secs)

        # Get requested back data. Note that we may have had different
        # back data time spans for different fields. Because some of these
        # might be extremely voluminous (think 30 minutes of winch data),
        # take the computational hit of initially creating a separate
        # reader for each backlog.
        fields = []
        back_data = {}
        for (field_name, num_secs) in field_list:
            fields.append(field_name)
            if not num_secs in back_data:
                back_data[num_secs] = []
            back_data[num_secs].append(field_name)

        results = {}
        now = time.time()
        for (num_secs, field_list) in back_data.items():
            # Create a DatabaseReader to get num_secs worth of back data for
            # these fields. Provide a start_time of num_secs ago, and no
            # stop_time, so we get everything up to present.
            logging.debug('Creating DatabaseReader for %s', field_list)
            logging.debug('Requesting %g seconds of timestamps from %f-%f',
                          num_secs, now - num_secs, now)
            reader = DatabaseReader(fields, self.database, self.host,
                                    self.user, self.password)
            num_sec_results = reader.read_time_range(start_time=now - num_secs)
            logging.debug('results: %s', num_sec_results)
            results.update(num_sec_results)

        # Now that we've gotten all the back results, create a single
        # DatabaseReader to read all the fields.
        reader = DatabaseReader(fields, self.database, self.host, self.user,
                                self.password)
        max_timestamp_seen = 0

        while not self.quit_flag:
            # If we do have results, package them up and send them
            if results:
                send_message = json.dumps(results)
                logging.debug('Data server sending: %s', send_message)
                try:
                    await self.websocket.send(send_message)
                except websockets.exceptions.ConnectionClosed:
                    return

            # New results or not, take a nap before trying to fetch more results
            logging.debug('Sleeping %g seconds', self.interval)
            await asyncio.sleep(self.interval)

            # What's the timestamp of the most recent result we've seen?
            # Each value should be a list of (timestamp, value) pairs. Look
            # at the last timestamp in each value list.
            for field in results:
                last_timestamp = results[field][-1][0]
                max_timestamp_seen = max(max_timestamp_seen, last_timestamp)

            # Bug's corner case: if we didn't retrieve any data on the first
            # time through (because it was all too old), max_timestamp_seen
            # will be zero, causing us to retrieve *all* the data in the DB
            # on the next iteration. If we do find that max_timestamp_seen
            # is zero, set it to "now" to prevent this.
            if not max_timestamp_seen:
                max_timestamp_seen = now

            logging.debug('Results: %s', results)
            if len(results):
                logging.info('Received %d fields, max timestamp %f',
                             len(results), max_timestamp_seen)

            # Check whether there are results newer than latest timestamp
            # we've already seen.
            results = reader.read_time_range(start_time=max_timestamp_seen +
                                             EPSILON)