import sys, os, json
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from klampt.robotsim import WorldModel
from klampt import se3, gldraw
from OpenGL.GL import glEnable, glDisable, glBlendFunc, glMaterialfv, glColor3f, glMatrixMode, glLoadIdentity, glMultMatrixf, glOrtho, glRasterPos3f
from OpenGL.GL import GL_BLEND, GL_SRC_ALPHA, GL_FRONT_AND_BACK, GL_ONE_MINUS_SRC_ALPHA, GL_AMBIENT_AND_DIFFUSE
from OpenGL.GL import GL_LIGHTING, GL_PROJECTION, GL_MODELVIEW
from OpenGL.GLU import gluPerspective, gluProject
from OpenGL.GLUT import glutLeaveMainLoop, glutPostRedisplay, glutPositionWindow, GLUT_BITMAP_HELVETICA_12

from integration.visualization import WorldViewer

world = WorldModel()
# shelf = world.loadRigidObject('klampt_models/north_shelf/shelf_with_bins.obj')
shelf = world.loadRigidObject('klampt_models/kiva_pod/model.obj')

bin_vantage_points = json.load(open('kb/bin_vantage_points.json'))
vantage_point_xforms = json.load(open('kb/vantage_point_xforms.json'))

# trabsform vantage points into world coordinates
for k in vantage_point_xforms.keys():
    vantage_point_xforms[k] = se3.mul(shelf.getTransform(), vantage_point_xforms[k])

class Viewer(WorldViewer):
    def display_screen(self):
        glDisable(GL_LIGHTING)

        glColor3f(1, 1, 1)

        # Projection
objects = json.load(open(os.path.join("perception", "null_match_values.json"))).keys()

SHELF_DIMS_PATH = os.path.join("perception", "shelf_subtract", "shelf_dims.json")
SHELF_CLOUD_PATH = os.path.join("perception", "shelf_subtract", "shelf_10000.npy")

shelf_xform = (numpy.array([[0, 1, 0], [-1, 0, 0], [0, 0, 1]]), [1.03, -0.055, -0.9046])

# load the shelf subtraction data
bin_bounds = json.load(open(SHELF_DIMS_PATH))[bin]
# load and transform the shelf point cloud
shelf_cloud = numpy.load(open(SHELF_CLOUD_PATH))
shelf_cloud = shelf_cloud.dot(shelf_xform[0]) + shelf_xform[1]

# load and transform the shelf model
world = WorldModel()
shelf = world.loadRigidObject("klampt_models/north_shelf/shelf_with_bins.obj")
shelf_xform[1][2] -= 0.01
shelf.setTransform(list(shelf_xform[0].flat), shelf_xform[1])

for obj in objects:
    print "swtich to", obj

    for n in range(6):
        print "take", n,

        color = numpy.load(os.path.join(base_path, "{}_{}_color.npy".format(obj, n)))
        depth_uv = numpy.load(os.path.join(base_path, "{}_{}_depth_uv.npy".format(obj, n)))
        cloud = numpy.load(os.path.join(base_path, "{}_{}_cloud.npy".format(obj, n)))

        # clean up the point cloud
        clean_mask = invalid_mask(cloud)
Example #3
0
# master._set_target('bin_B', 'kong_duck_dog_toy')
master._localize_shelf_and_order_bin()
# master._localize_object('unused')
# master.knowledge_base.active_limb = 'left'
# master.knowledge_base.grasped_object = 'crayola_64_ct'
# master.knowledge_base.active_grasp = [[0.8449032475481839, 0.5332477717385703, -0.0422530024777488, -0.053105482443014856, 0.005018582272109273, -0.9985762973185823, -0.532276535286903, 0.8459442226103591, 0.03255859663963079], [0.1039670743637746, -0.1590815806021303, 0.07642602363877261]]

task = master.manager.control.update()
while not task.done: sleep(0.1)

kb = master.knowledge_base

world = WorldModel()
robot = world.loadRobot('klampt_models/baxter_with_reflex_gripper_col.rob')

shelf = world.loadRigidObject('klampt_models/north_shelf/shelf_with_bins.obj')
shelf.setTransform(*kb.shelf_xform)

order_bin = world.loadRigidObject('klampt_models/north_order_bin/order_bin.obj')
order_bin.setTransform(*kb.order_bin_xform)

while True:
    print '{} >'.format(
        # master.knowledge_base.active_limb or '-',
        # master.knowledge_base.target_bin or '-',
        master.knowledge_base.target_object or '-',
    ),

    try:
        parts = str(raw_input()).split(' ')
    except (EOFError, KeyboardInterrupt):
Example #4
0
    # load the object model point cloud
    mark = time()
    model_cloud_color = pcd.parse(open(OBJECT_MODEL_PATH))[1]
    model_cloud = numpy.array([ p[:3] for p in model_cloud_color ])
    logging.info('loaded {} model points in {:.3f}s'.format(model_cloud.shape[0], time() - mark))

    # set up the bin bounds
    bin_bounds = json.load(open(SHELF_DIMS_PATH))[BIN_NAME]

    # for (i, p) in enumerate(camera_cloud_color):
        # p[:3] = camera_cloud[i]
    # debug_cloud([ shelf_cloud, camera_cloud ])

    world = WorldModel()
    shelf = world.loadRigidObject('klampt_models/north_shelf/shelf.obj')
    shelf.setTransform(list(shelf_xform[0].flat), shelf_xform[1])

    _, camera_cloud_aligned, object_cloud, mask = shelf_subtract(shelf_cloud, camera_cloud, shelf, bin_bounds)

    # debug_cloud([ shelf_cloud, camera_cloud, camera_cloud_aligned ])
    # debug_cloud([ object_cloud ], world=world)
    debug_cloud([ shelf_cloud, camera_cloud, camera_cloud_aligned ])
    debug_cloud([ object_cloud, camera_cloud_aligned ])

    object_cloud_color = []
    for i in range(len(mask)):
        if mask[i]:
            object_cloud_color.append(camera_cloud_color[i])
    for (i, p) in enumerate(object_cloud_color):
        p[:3] = object_cloud[i]
Example #5
0
from apc.motion import PhysicalLowLevelController as LowLevelController
from integration.visualization import debug_cloud
from integration.camera import RemoteCamera
from integration.camera import uvtexture, invalid_mask
from integration.io import pcd
from perception.segmentation.shelf import shelf_subtract

limb = sys.argv[1]
if limb not in [ 'right', 'left' ]:
    logger.error('invalid limb: {}'.format(limb))
    raise SystemExit
bin = sys.argv[2]

world = WorldModel()
model = world.loadRobot(os.path.join('klampt_models', baxter.klampt_model_name))
shelf = world.loadRigidObject(os.path.join('klampt_models', 'north_shelf', 'shelf_with_bins.obj'))

robot = LowLevelController(model, baxter.klampt_model_name)
model.setConfig(robot.getSensedConfig())

shelf_xform = (
    numpy.array([[  0,  1,  0 ],
                 [ -1,  0,  0 ],
                 [  0,  0,  1 ]]),
    [ 1.03, -0.055, -0.9046 ]
)
shelf.setTransform(list(shelf_xform[0].flat), shelf_xform[1])

SHELF_DIMS_PATH = os.path.join('kb', 'shelf_dims.json')
SHELF_CLOUD_PATH = os.path.join('perception', 'segmentation', 'models', 'shelf_thin_10000.npy')
    
Example #6
0
    def localizeSpecificObject(self, bin, object):
        SHELF_DIMS_PATH = os.path.join('kb', 'shelf_dims.json')
        SHELF_CLOUD_PATH = os.path.join('perception', 'segmentation', 'models', 'shelf_thin_10000.npy')
    
        # acquire the camera image
        address = { 'left': '192.168.0.103', 'right': '192.168.0.104' }[self.knowledge_base.active_limb]
        #address = { 'left': '192.168.0.103', 'right': '192.168.0.103' }[self.knowledge_base.active_limb]
        camera = RemoteCamera(address, xform=self.camera_xform)
        camera.read()
        camera.close()

        # load the shelf subtraction data
        bin_bounds = json.load(open(SHELF_DIMS_PATH))[bin]
        # load and transform the shelf point cloud
        shelf_cloud = numpy.load(open(SHELF_CLOUD_PATH))
        shelf_cloud = shelf_cloud.dot(numpy.array(self.knowledge_base.shelf_xform[0]).reshape((3,3))) + self.knowledge_base.shelf_xform[1]
        # load and transform the shelf model
        world = WorldModel()
        shelf = world.loadRigidObject(os.path.join('klampt_models', 'north_shelf', 'shelf_with_bins.obj'))
        shelf.setTransform(*self.knowledge_base.shelf_xform)

        # clean up the point cloud
        (clean_mask, clean_cloud) = camera.clean()
        _, clean_cloud_aligned, _, clean_object_mask = shelf_subtract(shelf_cloud, clean_cloud, shelf, bin_bounds, downsample=1000)
        object_mask = numpy.zeros(camera.cloud.shape[:2], dtype=numpy.bool)
        object_mask[clean_mask] = clean_object_mask

        #clean_cloud_aligned_color = map(lambda x: x[0] + [ x[1] ], zip(clean_cloud_aligned[clean_object_mask].tolist(), camera.colorize()[clean_mask][clean_object_mask].tolist()))
        #debug_cloud([ clean_cloud_aligned_color ], [ self.base_xform, self.camera_xform ])      
        #debug_cloud([ shelf_cloud, clean_cloud, clean_cloud_aligned ])
        #pcd.write(clean_cloud_aligned_color, open('/tmp/{}_{}.pcd'.format(self.knowledge_base.target_object, self.knowledge_base.target_bin), 'w'))

        (target_bin, target_object) = (self.knowledge_base.target_bin, self.knowledge_base.target_object)
        bin_contents = self.knowledge_base.bin_contents[target_bin] if target_bin in self.knowledge_base.bin_contents else []

        # perform special handling
        if target_object == 'rollodex_mesh_collection_jumbo_pencil_cup':
            logger.info('running specialized rollodex cups detection')
            return self._localizeRollodexCups(world, camera.color, clean_cloud_aligned, camera.depth_uv, clean_mask, object_mask)

        # dilate the object mask
        # from scipy.misc import imsave
        from scipy.ndimage.morphology import binary_dilation
        object_mask_dilated = binary_dilation(object_mask, iterations=2)
        # imsave('/tmp/test.png', object_mask)

        # label connected components
        (object_labeled, label_count) = distance_label(camera.cloud, object_mask_dilated, threshold=0.01)
        logger.info('found {} connected components'.format(label_count))       

        #KH: added to help debugging without changing order bin contents
        if object not in bin_contents:
            logger.warning("Warning, trying to detect object "+object+" not in bin contents, adding a temporary entry")
            bin_contents = bin_contents + [object]

        bin_contents_with_shelf = bin_contents + [ 'shelf' ]
        
        if not label_count:
            logger.warning('found no connected components')
            mask = object_mask
    
            bin_contents = self.knowledge_base.bin_contents[self.knowledge_base.target_bin]
            confusion_row = dict(zip(bin_contents, [0]*len(bin_contents)))
        else:
            object_blobs = sorted([ object_labeled == (l+1) for l in range(label_count) ], key=lambda b: -numpy.count_nonzero(b))
            #for (i, blob) in enumerate(object_blobs):
            #    logger.debug('blob {}: {} points'.format(i, numpy.count_nonzero(blob)))

            # filter out blobs with fewer than 1000 points
            min_point_count = 1000
            object_blobs = filter(lambda blob: numpy.count_nonzero(blob[clean_mask]) >= min_point_count, object_blobs)
                
            #debug_cloud([ clean_cloud_aligned[(object_mask & blob_mask)[clean_mask]] for blob_mask in object_blobs ], [ self.base_xform, self.camera_xform ], wait=False) 

            known_object_count = len(bin_contents)
            if known_object_count == 0:
                # return all large blobs
                n = len(object_blobs)
    
                # take only the largest n blobs
                blob_mask = reduce(numpy.bitwise_or, object_blobs[:n])
                mask = object_mask & blob_mask
                
                logger.warning('no objects in bin... -> returning all blobs')
            else:
                # load the matching statistics
                match_stats = json.load(open(os.path.join('perception', 'segmentation', 'match_stats.json')))            
                
                # load the object hue histograms
                known_histograms = {}
                for obj in bin_contents_with_shelf:
                    #array = numpy.load(os.path.join('perception', 'hue_array', '{}.npz'.format(obj)))['arr_0']
                    #known_histograms[obj] = numpy.histogram(array, bins=range(0,181), density=True)[0].reshape(-1,1).astype(numpy.float32)
                    known_histograms[obj] = numpy.load(os.path.join('perception', 'uv_hist2', '{}.npz'.format(obj)))['arr_0']
                                    
                # compute the color validity mask
                color_valid_mask = ~invalid_mask(camera.depth_uv)

                #KH addition 5/23: test to see whether blobs should be broken up
                if len(object_blobs)==1 and len(known_histograms) > 2:
                    logger.info("Trying KMeans segmentation to break up large blob")
                    blob_mask = reduce(numpy.bitwise_or, object_blobs)
                    mask = object_mask & blob_mask & color_valid_mask
                    labels,k,score = xyzrgb_segment_kmeans(camera.cloud[mask],camera.colorize()[mask],known_histograms.values())
                    labels = numpy.array(labels)
                    object_blobs = []
                    for i in xrange(len(known_histograms)):
                        blank = numpy.zeros_like(object_mask,dtype=numpy.bool)
                        blank[mask] = (labels==i)
                        object_blobs.append(blank)

                # score each blob for each object in the bin 
                blob_scores = []
                blob_accepts = []
                blob_rejects = []
                matched_blobs = {}
                confusion_matrix = []
                for (i, blob) in enumerate(object_blobs):
                    # compute the blob histogram
                    blob_uv = [ rgb2yuv(*rgb)[1:3] for rgb in camera.colorize()[object_mask & blob & color_valid_mask] ]
                    blob_histogram = make_uv_hist(blob_uv)
              
                    # compare the blob to each possible object                    
                    scores = dict([ (obj, 2 * numpy.minimum(blob_histogram, histogram).sum() - numpy.maximum(blob_histogram, histogram).sum()) for (obj, histogram) in known_histograms.items() ])                
                    
                    logger.debug('blob {}:'.format(i))
                    
                    blob_scores.append(scores)
                    logger.debug('  scores: {}'.format([ '{}={}'.format(*x) for x in scores.items() ]))
                    
                    # apply the cutoff to each score and associate with positive confidence
                    accepts = dict([ (obj, match_stats[obj]['ppv']) for obj in bin_contents_with_shelf if scores[obj] >= match_stats[obj]['cutoff'] ])
                    blob_accepts.append(accepts)
                    logger.debug('  accepts: {}'.format([ '{}={}'.format(*x) for x in accepts.items() ]))
                    
                    # apply the cutoff to each score and associate with negative confidence
                    rejects = dict([ (obj, match_stats[obj]['npv']) for obj in bin_contents_with_shelf if scores[obj] < match_stats[obj]['cutoff'] ])
                    blob_rejects.append(rejects)                    
                    logger.debug('  rejects: {}'.format([ '{}={}'.format(*x) for x in rejects.items() ]))

                    # populate the confusion matrix
                    shelf_probability = 0.1
                    S = lambda o: match_stats[o]['specificity']
                    iS = lambda o: 1 - S(o)
                    total_probability = sum(map(S, accepts)) + sum(map(iS, rejects)) + shelf_probability
                    confusion_matrix.append([ [ iS(o), S(o) ][ o in accepts ] / total_probability for o in bin_contents ])

                    # resolve multiple assignments
                    if len(accepts) > 1:
                        # choose the object with the highest matched confidence    
                        best_match_object = sorted(accepts.items(), key=lambda a: -a[1])[0][0]
                        # remove all other objects
                        for (obj, confidence) in accepts.items():
                            if obj != best_match_object:
                                del accepts[obj]
                                logger.warn('blob {} multiple accept ignored: {}={}'.format(i, obj, confidence))
                    
                    if len(accepts) == 1:
                        # a single match so record it
                        matched_blobs.setdefault(accepts.keys()[0], []).append(i)
                        logger.info('blob {} assigned to {} by single match'.format(i, accepts.keys()[0]))
                    
                confusion_matrix = numpy.array(confusion_matrix)
                logger.debug('confusion matrix:\n{}'.format(numpy.array(confusion_matrix)))
                    
                # assign blobs by least threshold difference until all objects are assigned (excluding shelf) or all blobs are assigned
                while len([ k for k in matched_blobs.keys() if k != 'shelf']) < len(bin_contents) and len(sum(matched_blobs.values(), [])) < len(object_blobs):
                    best_match_objects = []
                
                    for (i, scores) in enumerate(blob_scores):
                        # only consider unmatched blobs
                        if i not in sum(matched_blobs.values(), []):
                            threshold_differences = [ [obj, match_stats[obj]['cutoff'] - score]  for (obj, score) in scores.items() ]
                            best_match_objects.append([ i ] + sorted(threshold_differences, key=lambda d: d[1])[0])
                            
                    # choose the least threshold difference across all blobs and objects
                    best_blob, best_object, _ = sorted(best_match_objects, key=lambda b: b[2])[0]
                    matched_blobs.setdefault(best_object, []).append(best_blob)                    
                    logger.warn('blob {} assigned to {} by least threshold difference'.format(best_blob, best_object))
                
                # report unmatched blobs and objects (excluding shelf)
                for obj in bin_contents:
                    if obj not in matched_blobs:
                        logger.warn('no blobs matched {}'.format(obj))
                for i in range(len(object_blobs)):
                    if i not in sum(matched_blobs.values(), []):
                        logger.warn('blob {} is unassigned'.format(i))
                
                for obj in bin_contents_with_shelf:
                    if obj in matched_blobs:
                        print obj
                        mask = reduce(numpy.bitwise_or, [ object_blobs[i] for i in matched_blobs[obj] ], numpy.zeros_like(object_mask))
                        object_cloud = clean_cloud_aligned[mask[clean_mask]]
                        xform = (so3.identity(), object_cloud.mean(axis=0).tolist())
                        object_cloud_color = map(lambda x: x[0] + [ x[1] ], zip(object_cloud.tolist(), camera.colorize()[mask].tolist()))
                        #debug_cloud([ object_cloud_color ], [ self.base_xform, self.camera_xform, xform ])      
                
                # check that the target object was found
                if target_object not in matched_blobs:
                    logger.error('no blobs assigned to target object')
                    return None
                
                # return blob for the selected object
                mask = reduce(numpy.bitwise_or, [ object_blobs[i] for i in matched_blobs[target_object] ], numpy.zeros_like(object_mask))

                # return the confusion matrix rows for the selected object
                confusion_rows = confusion_matrix[matched_blobs[target_object], :]
                confusion_row = dict([ (obj, confusion_rows[:, i].mean()) for (i, obj) in enumerate(bin_contents) ])
                logger.info('confusion row for {}: {}'.format(target_object, confusion_row))

        object_cloud = clean_cloud_aligned[mask[clean_mask]]
        
        #xform = (so3.identity(), object_cloud.mean(axis=0).tolist())
        xform = se3.identity()
        object_cloud_color = map(lambda x: x[0] + [ x[1] ], zip(object_cloud.tolist(), camera.colorize()[mask].tolist()))
        #debug_cloud([ object_cloud_color ], [ self.base_xform, self.camera_xform, xform ])      
        
        # downsample the cloud to about 1000 points
        ratio = int(len(object_cloud) / 1000)
        if ratio > 1:
            object_cloud_color = object_cloud_color[::ratio]
        
        pcd.write(object_cloud_color, open('/tmp/{}_{}.pcd'.format(self.knowledge_base.target_object, self.knowledge_base.target_bin), 'w'))

        return (xform, object_cloud_color, confusion_row)