Esempio n. 1
0
def get_stick_scale(pointcloud, eps=0.1, min_samples=20):
    """Takes a point cloud, as a numpy array, looks for red segments
    of scale sticks and returns the scale estimation with most support.
    Method:
    pointcloud --dbscan--> clusters --lengthEstimation-->
        lengths --ransac--> best length

    Arguments:
        pointcloud    Point cloud containing only measuring stick segments
                      (only the red, or only the white parts)
        eps           DBSCAN parameter: Maximum distance between two samples
                      for them to be considered as in the same neighborhood.
        min_samples   DBSCAN parameter: The number of samples in a neighborhood
                      for a point to be considered as a core point.
    Returns:
        scale         Estimate of the size of one actual meter in expressed
                      in units of the pointcloud's coordinates.
        confidence    A number expressing the reliability of the estimated
                      scale. Confidence is in [0, 1]. With a confidence greater
                      than .5, the estimate can be considered useable for
                      further calculations.
    """

    # quickly return for trivial case
    if pointcloud.size == 0:
        return 1, 0

    # find the red segments to measure
    pc_reds = extract_mask(pointcloud, get_red_mask(pointcloud))
    if len(pc_reds) == 0:
        # unit scale, zero confidence (ie. any other estimation is better)
        return 1.0, 0.0

    cluster_generator = segment_dbscan(
        pc_reds, eps, min_samples, algorithm='kd_tree')

    sizes = [{'len': len(cluster),
              'meter': measure_length(cluster) * SEGMENTS_PER_METER}
             for cluster in cluster_generator]

    if len(sizes) == 0:
        return 1.0, 0.0

    scale, votes, n_clusters = ransac(sizes)
    confidence = get_confidence_level(votes, n_clusters)
    return scale, confidence
Esempio n. 2
0
import sys

from patty.segmentation import segment_dbscan
from patty.utils import load, save

if __name__ == '__main__':
    args = docopt(__doc__, sys.argv[1:])

    rgb_weight = float(args['--rgb_weight'])
    eps = float(args['<epsilon>'])
    minpoints = int(args['<minpoints>'])

    # Kludge to get a proper exception for file not found
    # (PCL will report "problem parsing header!").
    with open(args['<file>']) as _:
        pc = load(args['<file>'])
    print("%d points" % len(pc))

    clusters = segment_dbscan(pc, epsilon=eps, minpoints=minpoints,
                              rgb_weight=rgb_weight)

    n_outliers = len(pc)
    for i, cluster in enumerate(clusters):
        print("%d points in cluster %d" % (len(cluster), i))
        filename = '%s/cluster%d.%s' % (args['--output_dir'], i,
                                        args['--format'])
        save(cluster, filename)
        n_outliers -= len(cluster)

    print("%d outliers" % n_outliers)
Esempio n. 3
0
from patty.segmentation import segment_dbscan
from patty.utils import load, save

if __name__ == '__main__':
    args = docopt(__doc__, sys.argv[1:])

    rgb_weight = float(args['--rgb_weight'])
    eps = float(args['<epsilon>'])
    minpoints = int(args['<minpoints>'])

    # Kludge to get a proper exception for file not found
    # (PCL will report "problem parsing header!").
    with open(args['<file>']) as _:
        pc = load(args['<file>'])
    print("%d points" % len(pc))

    clusters = segment_dbscan(pc,
                              epsilon=eps,
                              minpoints=minpoints,
                              rgb_weight=rgb_weight)

    n_outliers = len(pc)
    for i, cluster in enumerate(clusters):
        print("%d points in cluster %d" % (len(cluster), i))
        filename = '%s/cluster%d.%s' % (args['--output_dir'], i,
                                        args['--format'])
        save(cluster, filename)
        n_outliers -= len(cluster)

    print("%d outliers" % n_outliers)