コード例 #1
0
ファイル: robotSim.py プロジェクト: jwzimmer16/hrb_p1_red
 def __init__(self, fn=None):
     """
 INPUT:
   fn -- filename / None -- laser log name to use for logging simulated 
       laser data. None logged if name is None
       
 ATTRIBUTES:
   tagPos -- 4x2 float array -- corners of robot tag
   laserAxis -- 2x2 float array -- two points along axis of laser
   waypoints -- dict -- maps waypoint tag numbers to 4x2 float 
       arrays of the tag corners
 """
     # Initialize dummy values into robot and arena state
     self.tagPos = asfarray(MSG_TEMPLATE[ROBOT_TAGID[0]])
     self.laserAxis = dot([[1, 1, 0, 0], [0, 0, 1, 1]], self.tagPos) / 2
     self.waypoints = {
         tid: asfarray(MSG_TEMPLATE[tid])
         for tid in waypoints
     }
     ### Initialize internal variables
     # Two points on the laser screen
     self.laserScreen = asfarray([[-1, -1], [1, -1]])
     # Cache for simulated TagStreamer messages
     self._msg = None
     # Output for simulated laser data
     if not fn:
         self.out = None
     else:
         self.out = opengz(fn, "w")
コード例 #2
0
  def __init__(self, fn=None):
    """
    INPUT:
      fn -- filename / None -- laser log name to use for logging simulated 
          laser data. None logged if name is None
          
    ATTRIBUTES:
      tagPos -- 4x2 float array -- corners of robot tag
      laserAxis -- 2x2 float array -- two points along axis of laser
      waypoints -- dict -- maps waypoint tag numbers to 4x2 float 
          arrays of the tag corners
    """
    # Initialize dummy values into robot and arena state
    self.tagPos = asfarray(MSG_TEMPLATE[ ROBOT_TAGID[0]])
    self.laserAxis = dot([[1,1,0,0],[0,0,1,1]],self.tagPos)/2
    self.waypoints = { tid : asfarray(MSG_TEMPLATE[tid]) for tid in waypoints }

    ### Initialize internal variables
    # Two points on the laser screen
    self.laserScreen = asfarray([[-1,-1],[1,-1]])
    # Cache for simulated TagStreamer messages
    self._msg = None
    # Output for simulated laser data
    if not fn:
      self.out = None
    else:
      self.out = opengz(fn,"w")
コード例 #3
0
 def __init__(self, fn=None):
     self.cam = JpegStreamCamera(
         "http://*****:*****@192.168.254.125/video.mjpg")
     self.win = Display((800, 600))
     self.clearBG()
     if not fn:
         self.out = None
     else:
         self.out = opengz(fn, "w")
         img = self.cam.getImage()
         img.save(fn + 'first.png')
コード例 #4
0
 def __init__(self, fn=None, cam=0):
   bdp = SimpleBlobDetector_Params()
   bdp.minArea = 5
   bdp.maxArea = 300
   bdp.filterByInertia = False
   bdp.filterByConvexity = False
   self.bd = SimpleBlobDetector(bdp)
   self.cam = VideoCapture(cam)
   #self.cam = VideoCapture("http://*****:*****@172.18.18.3:8080/video")
   self.clearBG()
   if not fn:
     self.out = None
   else:
     self.out = opengz(fn,"w")
     ok,img = self.cam.read()
     cv2_imwrite(fn+'first.png',img)
コード例 #5
0
def search(d, useRegex, searchTerm, debug):
    results = []

    for file in os.listdir(d):
        filepath = str(d + file)

        if file.endswith('.log'):
            # Read plain text
            if debug:
                print("Searching: {}".format(file))

            with open(filepath, 'r') as contents:
                for line in contents:
                    if useRegex:
                        if re.search(searchTerm, line):
                            results.append(str(file + line))
                    else:
                        if searchTerm in line.lower():
                            results.append(str(file + line))

        elif file.endswith('.log.gz'):
            # Read compressed text
            if debug:
                print("Searching: {}".format(file))

            try:
                with opengz(filepath, 'rt') as contents:
                    for line in contents:
                        if useRegex:
                            if re.search(searchTerm, line):
                                results.append(str(file + line))
                        else:
                            if searchTerm in line.lower():
                                results.append(str(file + line))

            except EOFError:
                # Catch for missing end of file marker
                if debug:
                    print(
                        "\nEOFError: {} ended before the end-of-stream marker was reached\n"
                        .format(file))
        elif debug:
            # Show skipped files
            print("File Skipped: {}".format(file))

    return results
コード例 #6
0
def series_by_filename_row(table, clean_store_dirpath, abs_tolerance=10):
    """Returns dictionary with path for files already in database, as defined
    as filename tag present and checking row_count in database vs CSV

    :param table: table name
    :param clean_store_dirpath: base path
    :param abs_tolerance:
    :return: {filename: {row_count, filepath}
    """
    # Get the filename tags in the database
    tags_by_filename = \
        series_by_filename(tag='filename',
                           clean_store_dirpath=clean_store_dirpath)

    # For each series in database
    ans = dict()
    for each_filename, each_path in tags_by_filename.items():
        # query row count
        cql = 'SELECT COUNT(bid) ' \
              'FROM {} ' \
              'WHERE filename=\'{}\''.format(table,
                                             each_filename)
        cql_response = db_man.influx_qry(cql).items()

        row_count_db = next(cql_response[0][1])['count']
        # get row count in csv

        row_count_csv = sum(1 for _r in opengz(each_path, 'r')) - 1

        # compare the two results
        difference = abs(row_count_db - row_count_csv)
        if difference <= abs_tolerance:
            logger.info('{} already in database with {} data points and {} '
                        'difference'.format(each_filename,
                                            row_count_db,
                                            difference))
            ans[each_filename] = each_path
        else:
            logger.warning('Incomplete series {} deleted, '
                           'difference {}'.format(each_filename,
                                                  difference))
            # if difference is greater the series is incomplete, something
            # went wrong. Delete it !
            db_man.delete_series(tags={'filename': each_filename})

    return ans
コード例 #7
0
def insert_validation(filepath, table, tags, abs_tolerance=10):
    """Validate number of rows: CSV vs Database
    """
    client = db_man.influx_client(client_type='dataframe', user_type='reader')

    filename = tags['filename']
    symbol = tags['symbol']
    provider = tags['provider']
    row_count = sum(1 for _r in opengz(filepath, 'r')) - 1

    cql = 'SELECT COUNT(bid) FROM {} ' \
          'WHERE filename=\'{}\' ' \
          'AND symbol=\'{}\' ' \
          'AND provider=\'{}\''.format(table,
                                       filename,
                                       symbol,
                                       provider)

    try:
        rows_in_db = client.query(query=cql)[table]['count'].iloc[0]
        client.close()
    except KeyError:
        logger.info('Data from {} not in database'.format(filename))
        return {
            'value': 'Not in DB',
            'csv': row_count,
            'sec_master': 0,
            'diff': row_count
        }

    difference = abs(row_count - rows_in_db)
    if difference == 0:
        ans = 'Exact'
    elif difference > abs_tolerance:
        ans = 'Not Acceptable'
    else:
        ans = 'Acceptable'

    logger.info('Validation {} difference of {}'.format(ans, difference))
    return {
        'value': ans,
        'csv': row_count,
        'sec_master': rows_in_db,
        'diff': difference
    }
コード例 #8
0
def series_by_filename_row(table, clean_store_dirpath, abs_tolerance=10):
    """Returns dictionary with path for files already in database, as defined
    as filename tag present and checking row_count in database vs CSV

    :param table: table name
    :param clean_store_dirpath: base path
    :param abs_tolerance:
    :return: {filename: {row_count, filepath}
    """
    # Get the filename tags in the database
    tags_by_filename = \
        series_by_filename(tag='filename',
                           clean_store_dirpath=clean_store_dirpath)

    # For each series in database
    ans = dict()
    for each_filename, each_path in tags_by_filename.items():
        # query row count
        cql = 'SELECT COUNT(bid) ' \
              'FROM {} ' \
              'WHERE filename=\'{}\''.format(table,
                                             each_filename)
        cql_response = db_man.influx_qry(cql).items()

        row_count_db = next(cql_response[0][1])['count']
        # get row count in csv

        row_count_csv = sum(1 for _r in opengz(each_path, 'r')) - 1

        # compare the two results
        difference = abs(row_count_db - row_count_csv)
        if difference <= abs_tolerance:
            logger.info('{} already in database with {} data points and {} '
                        'difference'.format(each_filename, row_count_db,
                                            difference))
            ans[each_filename] = each_path
        else:
            logger.warning('Incomplete series {} deleted, '
                           'difference {}'.format(each_filename, difference))
            # if difference is greater the series is incomplete, something
            # went wrong. Delete it !
            db_man.delete_series(tags={'filename': each_filename})

    return ans
コード例 #9
0
def insert_validation(filepath, table, tags, abs_tolerance=10):
    """Validate number of rows: CSV vs Database
    """
    client = db_man.influx_client(client_type='dataframe', user_type='reader')

    filename = tags['filename']
    symbol = tags['symbol']
    provider = tags['provider']
    row_count = sum(1 for _r in opengz(filepath, 'r')) - 1

    cql = 'SELECT COUNT(bid) FROM {} ' \
          'WHERE filename=\'{}\' ' \
          'AND symbol=\'{}\' ' \
          'AND provider=\'{}\''.format(table,
                                       filename,
                                       symbol,
                                       provider)

    try:
        rows_in_db = client.query(query=cql)[table]['count'].iloc[0]
        client.close()
    except KeyError:
        logger.info('Data from {} not in database'.format(filename))
        return {'value': 'Not in DB', 'csv': row_count,
                'sec_master': 0, 'diff': row_count}

    difference = abs(row_count - rows_in_db)
    if difference == 0:
        ans = 'Exact'
    elif difference > abs_tolerance:
        ans = 'Not Acceptable'
    else:
        ans = 'Acceptable'

    logger.info('Validation {} difference of {}'.format(ans, difference))
    return {'value': ans, 'csv': row_count,
            'sec_master': rows_in_db, 'diff': difference}
コード例 #10
0
ファイル: GEMMapping.py プロジェクト: xma82/SGVFinder
def _combine_map_files(seqdict, map_fs, outfile, fq_f1, fq_f2=None):
    mapfscopy = [f for f in map_fs]
    if type(seqdict) is types.FunctionType:
        seqdict = seqdict()
    out_f = outfile + '.tmp'
    try:
        fq1, fq2, ot = _open_gz_indif(fq_f1), (_open_gz_indif(fq_f2) if fq_f2
                                               is not None else None), opengz(
                                                   out_f, 'wb')
        map_fs = [open(f) for f in map_fs]
        map_fs_iters = [_stripread(f) for f in map_fs]
        map_new_cells = [f.next().split(TAB) for f in map_fs_iters]
        for r1, r2 in izip(
                SeqIO.parse(fq1, 'fastq'),
            (SeqIO.parse(fq2, 'fastq') if fq2 is not None else _none_iter())):
            curid = r1.id.replace('/1', '')
            curr = SourceRead(curid, \
                              [str(r1.seq)] if r2 is None else [str(r1.seq), str(r2.seq)], \
                              [r1.letter_annotations['phred_quality']] if r2 is None else \
                              [r1.letter_annotations['phred_quality'], \
                               r2.letter_annotations['phred_quality']])
            for i in range(len(map_fs_iters))[::-1]:
                while len(
                        map_new_cells[i]) > 0 and map_new_cells[i][0].replace(
                            '/1', '').replace('/2', '').split()[0] == curid:
                    curr._add_from_cells(map_new_cells[i], \
                                         0 if r1.description == map_new_cells[i][0] \
                                         else 1 if r2.description == map_new_cells[i][0] else 2, \
                                         seqdict)
                    try:
                        map_new_cells[i] = map_fs_iters[i].next().split(TAB)
                    except StopIteration:
                        del map_new_cells[i]
                        del map_fs_iters[i]
                        break

            if len(curr) > 0:
                curr.sort()
                ujson.dump(curr.to_ser(), ot)
                ot.write('\n')
    finally:
        for f in map_fs:
            try:
                f.close()
            except:
                pass
        try:
            fq1.close()
        except:
            pass
        try:
            fq2.close()
        except:
            pass
        try:
            ot.close()
        except:
            pass
    shutil.move(out_f, outfile)
    for f in mapfscopy:
        _tryrm(f)
コード例 #11
0
ファイル: GEMMapping.py プロジェクト: xma82/SGVFinder
def _load_from_file(fpath):
    with opengz(fpath) as fin:
        for r in _load_iterable_fromdesc(fin):
            yield r
コード例 #12
0
def open_gz_indif(filepath):
    return opengz(filepath) if splitext(filepath)[1] == GZIP_EXT else open(
        filepath)
コード例 #13
0
        pts[nm,:,2] = 1

args = iter([ x.strip() for x in raw_input("Options: ").split(" ") ])
logfile = None
for arg in args:
  if not arg:
      continue
  if arg == '-d' or arg == '--dest':
    WAYPOINT_LISTENERS = args.next().split(",")
    print "::: Destination(s) set to '%s'" % WAYPOINT_LISTENERS
    continue
  if arg == '-l' or arg == '--log':
    lfn = args.next()
    if not lfn.endswith('.gz'):
      lfn = lfn + ".gz"
    logfile = opengz(lfn)
    print "::: Logging to '%s' " % lfn
    continue
  if arg != '-h' and arg != '--help':
    print "Unrecognized argument '%s'" % arg
    # Fall through to help message  
  print """
    Usage %s <options>
    
       ( '-h' | '--help' )
            this help message
       ( '-l' | '--log' ) <filename>
            filename for logging data
       ( '-d' | '--dest' ) <destination-list>
            destinations for waypoint messages, as comma separated list
            of DNS names or IP addresses.