コード例 #1
0
ファイル: usbstick.py プロジェクト: juansokil/insulaudit
 def decode(self):
     """Should set self.info"""
     self.reply = Reply(self.response)
     self.info = self.__info__
     if self.reply.ack.isACK():
         self.onACK()
     else:
         log.info('nonack:%s' % self.reply.ack)
     self.reply.info = self.info
コード例 #2
0
ファイル: usbstick.py プロジェクト: bewest/insulaudit
 def decode(self):
   """Should set self.info"""
   self.reply    = Reply( self.response )
   self.info = self.__info__
   if self.reply.ack.isACK( ):
     self.onACK()
   else:
     log.info('nonack:%s' % self.reply.ack)
   self.reply.info = self.info
コード例 #3
0
ファイル: CommBuffer.py プロジェクト: bewest/insulaudit
  def open( self, newPort=False, **kwds ):
    if newPort:
      self.port = newPort

    self.serial = serial.Serial( self.port, timeout=self.__timeout__, **kwds )

    if self.serial.isOpen( ):
      log.info( '{agent} opened serial port: {serial}'\
         .format( serial = repr( self.serial ),
                  agent  =self.__class__.__name__ ) )
コード例 #4
0
ファイル: glucose.py プロジェクト: bewest/insulaudit
def parse_text( text ):
  """
  A glucose record is a tuple of the time and glucose.
    ( datetime.datetime, int )

  >>> len( parse_text(  '''2011-01-01 01:02  076''' )[ 0 ] )
  2

  # spaces
  >>> date, value = parse_text(  '''2011-01-01 01:02  076''' )[ 0 ]
  ... #
  >>> date.isoformat( )
  '2011-01-01T01:02:00'
  >>> value
  76

  # tabs
  >>> date, value = parse_text(  '''2011-01-01T01:02	076 ''' )[ 0 ]
  >>> (date.isoformat( ), value)
  ('2011-01-01T01:02:00', 76)

  # T
  >>> date, value = parse_text(  '''2011-01-01T01:02	076''' )[ 0 ]
  >>> date.isoformat( ), value
  ('2011-01-01T01:02:00', 76)

  # PM/AM
  >>> date, value = parse_text(  '''2011-01-01 01:02AM 076''' )[ 0 ]
  >>> date.isoformat( ), value
  ('2011-01-01T01:02:00', 76)
  >>> date, value = parse_text(  '''2011-01-01 01:02PM 076''' )[ 0 ]
  >>> date.isoformat( ), value
  ('2011-01-01T13:02:00', 76)
  >>> date, value = parse_text(  '''2011-01-01	01:02PM 076''' )[ 0 ]
  >>> date.isoformat( ), value
  ('2011-01-01T13:02:00', 76)


  """
  # TODO: sensitivity to timezones!
  results = [ ]
  for datum in text.splitlines( ):
    frags = datum.strip( ).split( )
    if frags == [ ]: continue
    log.info( frags )
    #frags = map( string.strip, datum.strip( ).split( ) )
    value = int( frags[ -1 ] )
    date  = None
    try:
      date = text2date( ' '.join( frags[ 0:-1 ] ) )
      results.append( ( date, value ) )
    except IndexError, e:
      log.error( 'error %s' % ( e ) )
コード例 #5
0
ファイル: CommBuffer.py プロジェクト: juansokil/insulaudit
    def open(self, newPort=False, **kwds):
        if newPort:
            self.port = newPort

        self.serial = serial.Serial(self.port,
                                    timeout=self.__timeout__,
                                    **kwds)

        if self.serial.isOpen():
            log.info( '{agent} opened serial port: {serial}'\
               .format( serial = repr( self.serial ),
                        agent  =self.__class__.__name__ ) )
コード例 #6
0
def parse_text(text):
    """
  A glucose record is a tuple of the time and glucose.
    ( datetime.datetime, int )

  >>> len( parse_text(  '''2011-01-01 01:02  076''' )[ 0 ] )
  2

  # spaces
  >>> date, value = parse_text(  '''2011-01-01 01:02  076''' )[ 0 ]
  ... #
  >>> date.isoformat( )
  '2011-01-01T01:02:00'
  >>> value
  76

  # tabs
  >>> date, value = parse_text(  '''2011-01-01T01:02	076 ''' )[ 0 ]
  >>> (date.isoformat( ), value)
  ('2011-01-01T01:02:00', 76)

  # T
  >>> date, value = parse_text(  '''2011-01-01T01:02	076''' )[ 0 ]
  >>> date.isoformat( ), value
  ('2011-01-01T01:02:00', 76)

  # PM/AM
  >>> date, value = parse_text(  '''2011-01-01 01:02AM 076''' )[ 0 ]
  >>> date.isoformat( ), value
  ('2011-01-01T01:02:00', 76)
  >>> date, value = parse_text(  '''2011-01-01 01:02PM 076''' )[ 0 ]
  >>> date.isoformat( ), value
  ('2011-01-01T13:02:00', 76)
  >>> date, value = parse_text(  '''2011-01-01	01:02PM 076''' )[ 0 ]
  >>> date.isoformat( ), value
  ('2011-01-01T13:02:00', 76)


  """
    # TODO: sensitivity to timezones!
    results = []
    for datum in text.splitlines():
        frags = datum.strip().split()
        if frags == []: continue
        log.info(frags)
        #frags = map( string.strip, datum.strip( ).split( ) )
        value = int(frags[-1])
        date = None
        try:
            date = text2date(' '.join(frags[0:-1]))
            results.append((date, value))
        except IndexError, e:
            log.error('error %s' % (e))
コード例 #7
0
def format_glucose(data):
    """
    >>> date, value = format_glucose( '''P "WED","11/10/10","01:46:00   ''' 
    ...               + '''","  076 ","N","00", 00 099C''' )
    >>> date.isoformat( )
    '2010-11-10T01:46:00'
    >>> value
    76
  """
    try:
        date = lib.parse.date('T'.join(data.replace('"', '').split(',')[1:3]))
        value = int(data.split('"')[7].strip())
    except (IndexError, ValueError), e:
        log.info(data)
        raise InvalidGlucose(data)
コード例 #8
0
ファイル: proto.py プロジェクト: kakoni/insulaudit
def format_glucose( data ):
  """
    >>> date, value = format_glucose( '''P "WED","11/10/10","01:46:00   ''' 
    ...               + '''","  076 ","N","00", 00 099C''' )
    >>> date.isoformat( )
    '2010-11-10T01:46:00'
    >>> value
    76
  """
  try:
    date = lib.parse.date( 'T'.join(
                     data.replace( '"', '' ).split( ',' )[ 1:3 ]) )
    value = int( data.split( '"' )[ 7 ].strip( ) )
  except (IndexError, ValueError), e:
    log.info( data )
    raise InvalidGlucose( data )
コード例 #9
0
def decode_chunk(chunk):
    """
  Experiment: decode a chunk!
  TODO: how do we decode historical data?
  It's likely composed of regions representing records, either with
  some kind of delimiter, or with a common header.

  Looking at the hex dump of chunks, it does indeed look like there
  is some kind of repeating pattern.  But is there an offset to
  begin?  Is each record the same size, or is there a header
  describing the record?

  """
    hex_dump_data(chunk)

    longs = []
    for x in range(0, 32, 4):
        q = lib.BangLong(chunk[x:x + 4])
        longs.append(q)
    logger.info('longs? %s' % ' '.join(map(str, longs)))
コード例 #10
0
ファイル: zero.py プロジェクト: bewest/insulaudit
def decode_chunk(chunk):
  """
  Experiment: decode a chunk!
  TODO: how do we decode historical data?
  It's likely composed of regions representing records, either with
  some kind of delimiter, or with a common header.

  Looking at the hex dump of chunks, it does indeed look like there
  is some kind of repeating pattern.  But is there an offset to
  begin?  Is each record the same size, or is there a header
  describing the record?

  """
  hex_dump_data(chunk)

  longs = [ ]
  for x in range(0, 32, 4):
    q = lib.BangLong(chunk[x:x+4])
    longs.append(q)
  logger.info('longs? %s' % ' '.join(map(str, longs)))
コード例 #11
0
ファイル: zero.py プロジェクト: n-west/insulaudit
def main(*args):
    """ some boiler plate to set up logging, reproducible runs, and
  get our little decoder's IO up and running.
  """
    global settings
    settings.CHUNK_SIZE, settings.PROLOG_SIZE
    parser = get_argparser()
    args = list(args)
    cmd, args = args[0], args[1:]
    opts = parser.parse_args((args))
    # logger.info('opts: %s' % (pformat(args)))
    settings.CHUNK_SIZE = opts.chunk
    settings.PROLOG_SIZE = opts.prolog
    cmdline = [cmd, "--chunk %s" % (settings.CHUNK_SIZE), "--prolog %s" % (settings.PROLOG_SIZE)] + opts.input
    print " ".join(cmdline)

    logger.info("opening %s" % (opts.input))

    for item in opts.input:
        do_input(item)
コード例 #12
0
def main(*args):
    """ some boiler plate to set up logging, reproducible runs, and
  get our little decoder's IO up and running.
  """
    global settings
    settings.CHUNK_SIZE, settings.PROLOG_SIZE
    parser = get_argparser()
    args = list(args)
    cmd, args = args[0], args[1:]
    opts = parser.parse_args((args))
    #logger.info('opts: %s' % (pformat(args)))
    settings.CHUNK_SIZE = opts.chunk
    settings.PROLOG_SIZE = opts.prolog
    cmdline = [
        cmd,
        '--chunk %s' % (settings.CHUNK_SIZE),
        '--prolog %s' % (settings.PROLOG_SIZE)
    ] + opts.input
    print ' '.join(cmdline)

    logger.info('opening %s' % (opts.input))

    for item in opts.input:
        do_input(item)
コード例 #13
0
ファイル: zero.py プロジェクト: n-west/insulaudit
def do_input(pathish):
    """given something that looks like a file path, try to get data
  and decode it.
  
  # first fast forward into some offset.
  # then report on how many chunks we read.
  """
    handle = get_raw_handle(pathish)
    pos = handle.tell()
    size = getsize(pathish)
    logger.info("opening %s (%s bytes)" % (pathish, size))

    # first fast forward into some offset.
    logger.info("reading prologue (%s bytes)" % (settings.PROLOG_SIZE))
    prolog = handle.read(settings.PROLOG_SIZE)

    # then report on how many chunks we read.
    for i in itertools.count():
        if pos < size:
            #      logger.info('chunk: %s' % i)
            do_chunk(handle)
            pos = handle.tell()
        else:
            break
コード例 #14
0
def do_input(pathish):
    """given something that looks like a file path, try to get data
  and decode it.
  
  # first fast forward into some offset.
  # then report on how many chunks we read.
  """
    handle = get_raw_handle(pathish)
    pos = handle.tell()
    size = getsize(pathish)
    logger.info('opening %s (%s bytes)' % (pathish, size))

    # first fast forward into some offset.
    logger.info('reading prologue (%s bytes)' % (settings.PROLOG_SIZE))
    prolog = handle.read(settings.PROLOG_SIZE)

    # then report on how many chunks we read.
    for i in itertools.count():
        if pos < size:
            logger.info('chunk: %s' % i)
            do_chunk(handle)
            pos = handle.tell()
        else:
            break
コード例 #15
0
def read_chunk(handle):
    """read a chunk, and normalize it's representation as a
  bytearray."""
    msg = (settings.CHUNK_SIZE, handle.tell())
    logger.info('start reading (bytes %s) from offset %s' % msg)
    return bytearray(handle.read(settings.CHUNK_SIZE))
コード例 #16
0
def get_raw_handle(pathish):
    """Obtain a file-like handle from something describing access to
  data."""
    logger.info('opening %s' % (pathish))
    handle = open(pathish)
    return handle
コード例 #17
0
ファイル: usbstick.py プロジェクト: bewest/insulaudit
 def decode(self):
   self.info = int(bytearray(self.response[ 3 ])[0])
   log.info( '{0}: {1}dBm'.format( self.label, self.info ) )
コード例 #18
0
ファイル: usbstick.py プロジェクト: juansokil/insulaudit
 def decode(self):
     self.info = int(bytearray(self.response[3])[0])
     log.info('{0}: {1}dBm'.format(self.label, self.info))
コード例 #19
0
ファイル: zero.py プロジェクト: bewest/insulaudit
def read_chunk(handle):
  """read a chunk, and normalize it's representation as a
  bytearray."""
  msg = (settings.CHUNK_SIZE, handle.tell( ))
  logger.info('start reading (bytes %s) from offset %s' % msg)
  return bytearray(handle.read(settings.CHUNK_SIZE))
コード例 #20
0
ファイル: zero.py プロジェクト: bewest/insulaudit
def get_raw_handle(pathish):
  """Obtain a file-like handle from something describing access to
  data."""
  logger.info('opening %s' % (pathish))
  handle = open(pathish)
  return handle