Example #1
0
    def __init__(self, knowledge_base):
        PerceptionInterface.__init__(self, knowledge_base)

        world = WorldModel()
        robot = world.loadRobot(os.path.join('klampt_models', baxter.klampt_model_name))
        robot.setConfig(self.knowledge_base.robot_state.sensed_config)

        self.base_xform = robot.getLink('{}_gripper'.format(self.knowledge_base.active_limb)).getTransform()
        if self.knowledge_base.active_limb == 'left':
            self.camera_xform = se3.mul(self.base_xform, self.knowledge_base.left_camera_offset_xform)
        else:
            self.camera_xform = se3.mul(self.base_xform, self.knowledge_base.right_camera_offset_xform)
Example #2
0
    def __init__(self, knowledge_base=None, title=None):
        world = WorldModel()
        #         self.ground = world.loadTerrain('klampt_models/plane.env')
        self.shelf = None
        self.order_bin = None
        self.objects = {}
        self.n = 0

        # instantiate the robot
        self.robot = world.loadRobot(os.path.join("klampt_models", baxter.klampt_model_name))
        logger.debug("robot model loaded from {}".format(baxter.klampt_model_name))

        self.knowledge_base = knowledge_base
        self.robot_state = None

        WorldViewer.__init__(self, world, title=title or "Knowledge Base Viewer")
Example #3
0
    def _handler(self):
        logger.debug('child {} start'.format(self.child.pid))

        # hold a reference to the world so that it is not garbage collected
        self.world = WorldModel()
        # create the robot model for the controller
        self.robot = self.world.loadRobot(os.path.join('klampt_models', baxter.klampt_model_name))
        logger.debug('robot model loaded from {}'.format(baxter.klampt_model_name))
        self.controller = LowLevelController(self.robot, baxter.klampt_model_name)
        logger.debug('controller low-level controller initialized')

        # send the initial controller state
        self.child_pipe.send((None, RobotState(self.controller)))

        while True:
            # get the call information
            try:
                # update the viewer while waiting for a command
                while not self.child_pipe.poll():
                    self._update_viewer()
                    sleep(interval)

                # receive the command
                (self.knowledge_base, self.method, self.args, self.kwargs) = self.child_pipe.recv()
            except EOFError:
                logger.info('child {} shutdown requested'.format(self.child.pid))
                # signal to shut down
                break

            # distinguish between pure knowledge base updates and method calls
            if not self.method:
                self._update_viewer()
                self._send_result(None)
                continue

            # route remote calls
            self.init_args = (self.controller, self.knowledge_base)
            # hook the sleep method to update the viewer during execution
            self.kwargs['sleep'] = lambda n: self._sleep(n)
            BaseJob._handler(self)

        # clean up
        # need to call the superclass since this class overrides with empty method
        BaseJob._close()
        # TODO: shutdown the controller
        #controller.motion.shutdown()

        self.viewer.close()
# allow importing from the repository root
import sys, os, json
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from klampt.robotsim import WorldModel
from klampt import se3, gldraw
from OpenGL.GL import glEnable, glDisable, glBlendFunc, glMaterialfv, glColor3f, glMatrixMode, glLoadIdentity, glMultMatrixf, glOrtho, glRasterPos3f
from OpenGL.GL import GL_BLEND, GL_SRC_ALPHA, GL_FRONT_AND_BACK, GL_ONE_MINUS_SRC_ALPHA, GL_AMBIENT_AND_DIFFUSE
from OpenGL.GL import GL_LIGHTING, GL_PROJECTION, GL_MODELVIEW
from OpenGL.GLU import gluPerspective, gluProject
from OpenGL.GLUT import glutLeaveMainLoop, glutPostRedisplay, glutPositionWindow, GLUT_BITMAP_HELVETICA_12

from integration.visualization import WorldViewer

world = WorldModel()
# shelf = world.loadRigidObject('klampt_models/north_shelf/shelf_with_bins.obj')
shelf = world.loadRigidObject('klampt_models/kiva_pod/model.obj')

bin_vantage_points = json.load(open('kb/bin_vantage_points.json'))
vantage_point_xforms = json.load(open('kb/vantage_point_xforms.json'))

# trabsform vantage points into world coordinates
for k in vantage_point_xforms.keys():
    vantage_point_xforms[k] = se3.mul(shelf.getTransform(), vantage_point_xforms[k])

class Viewer(WorldViewer):
    def display_screen(self):
        glDisable(GL_LIGHTING)

        glColor3f(1, 1, 1)
Example #5
0
from integration.planning_server import PlanningServer
from integration.control_server import ControlServer
from integration.interface import PlanningInterface, ControlInterface, PerceptionInterface
from api.shared import KnowledgeBase, RobotState
from apc.motion import PhysicalLowLevelController as LowLevelController

from klampt.robotsim import WorldModel
from klampt import se3,so3
from time import sleep

import json
apc_order = json.load(open(os.path.join(os.path.dirname(__file__), 'example.json')))

knowledge_base_dir = './kb/'

world = WorldModel()
model = world.loadRobot('klampt_models/baxter_with_reflex_gripper_col.rob')
print 'before'
robot = LowLevelController(model, 'baxter_with_reflex_gripper_col.rob')
print 'here'

kb = KnowledgeBase()
kb.robot_state = RobotState(robot)
print kb.robot_state
kb.active_limb = 'left'
kb.bin_contents = apc_order['bin_contents']

# make fake bin vantage points, need to update to real vantage points at some point
for k in [ 'bin_vantage_points', 'vantage_point_xforms' ]:
    path = os.path.join(knowledge_base_dir, '{}.json'.format(k))
    data = json.load(open(path))
objects = json.load(open(os.path.join("perception", "null_match_values.json"))).keys()

SHELF_DIMS_PATH = os.path.join("perception", "shelf_subtract", "shelf_dims.json")
SHELF_CLOUD_PATH = os.path.join("perception", "shelf_subtract", "shelf_10000.npy")

shelf_xform = (numpy.array([[0, 1, 0], [-1, 0, 0], [0, 0, 1]]), [1.03, -0.055, -0.9046])

# load the shelf subtraction data
bin_bounds = json.load(open(SHELF_DIMS_PATH))[bin]
# load and transform the shelf point cloud
shelf_cloud = numpy.load(open(SHELF_CLOUD_PATH))
shelf_cloud = shelf_cloud.dot(shelf_xform[0]) + shelf_xform[1]

# load and transform the shelf model
world = WorldModel()
shelf = world.loadRigidObject("klampt_models/north_shelf/shelf_with_bins.obj")
shelf_xform[1][2] -= 0.01
shelf.setTransform(list(shelf_xform[0].flat), shelf_xform[1])

for obj in objects:
    print "swtich to", obj

    for n in range(6):
        print "take", n,

        color = numpy.load(os.path.join(base_path, "{}_{}_color.npy".format(obj, n)))
        depth_uv = numpy.load(os.path.join(base_path, "{}_{}_depth_uv.npy".format(obj, n)))
        cloud = numpy.load(os.path.join(base_path, "{}_{}_cloud.npy".format(obj, n)))

        # clean up the point cloud
Example #7
0
import baxter_scoop as baxter

# master._set_target('bin_A', 'crayola_64_ct')
# master._set_target('bin_B', 'kong_duck_dog_toy')
master._localize_shelf_and_order_bin()
# master._localize_object('unused')
# master.knowledge_base.active_limb = 'left'
# master.knowledge_base.grasped_object = 'crayola_64_ct'
# master.knowledge_base.active_grasp = [[0.8449032475481839, 0.5332477717385703, -0.0422530024777488, -0.053105482443014856, 0.005018582272109273, -0.9985762973185823, -0.532276535286903, 0.8459442226103591, 0.03255859663963079], [0.1039670743637746, -0.1590815806021303, 0.07642602363877261]]

task = master.manager.control.update()
while not task.done: sleep(0.1)

kb = master.knowledge_base

world = WorldModel()
robot = world.loadRobot('klampt_models/baxter_with_reflex_gripper_col.rob')

shelf = world.loadRigidObject('klampt_models/north_shelf/shelf_with_bins.obj')
shelf.setTransform(*kb.shelf_xform)

order_bin = world.loadRigidObject('klampt_models/north_order_bin/order_bin.obj')
order_bin.setTransform(*kb.order_bin_xform)

while True:
    print '{} >'.format(
        # master.knowledge_base.active_limb or '-',
        # master.knowledge_base.target_bin or '-',
        master.knowledge_base.target_object or '-',
    ),
Example #8
0
    camera_cloud = camera_cloud.dot(numpy.array(camera_xform[0]).reshape((3, 3))) + camera_xform[1]

    # load the object model point cloud
    mark = time()
    model_cloud_color = pcd.parse(open(OBJECT_MODEL_PATH))[1]
    model_cloud = numpy.array([ p[:3] for p in model_cloud_color ])
    logging.info('loaded {} model points in {:.3f}s'.format(model_cloud.shape[0], time() - mark))

    # set up the bin bounds
    bin_bounds = json.load(open(SHELF_DIMS_PATH))[BIN_NAME]

    # for (i, p) in enumerate(camera_cloud_color):
        # p[:3] = camera_cloud[i]
    # debug_cloud([ shelf_cloud, camera_cloud ])

    world = WorldModel()
    shelf = world.loadRigidObject('klampt_models/north_shelf/shelf.obj')
    shelf.setTransform(list(shelf_xform[0].flat), shelf_xform[1])

    _, camera_cloud_aligned, object_cloud, mask = shelf_subtract(shelf_cloud, camera_cloud, shelf, bin_bounds)

    # debug_cloud([ shelf_cloud, camera_cloud, camera_cloud_aligned ])
    # debug_cloud([ object_cloud ], world=world)
    debug_cloud([ shelf_cloud, camera_cloud, camera_cloud_aligned ])
    debug_cloud([ object_cloud, camera_cloud_aligned ])

    object_cloud_color = []
    for i in range(len(mask)):
        if mask[i]:
            object_cloud_color.append(camera_cloud_color[i])
    for (i, p) in enumerate(object_cloud_color):
Example #9
0
class ControlJob(BaseJob):
    def __init__(self, knowledge_base):
        # this knowledge base reference is used for updating the robot state
        self.knowledge_base = knowledge_base

        # create the viewer
        self.viewer = Viewer()

        BaseJob.__init__(self, None, None)

    def _get_interface(self):
        return ControlInterface

    def _handler(self):
        logger.debug('child {} start'.format(self.child.pid))

        # hold a reference to the world so that it is not garbage collected
        self.world = WorldModel()
        # create the robot model for the controller
        self.robot = self.world.loadRobot(os.path.join('klampt_models', baxter.klampt_model_name))
        logger.debug('robot model loaded from {}'.format(baxter.klampt_model_name))
        self.controller = LowLevelController(self.robot, baxter.klampt_model_name)
        logger.debug('controller low-level controller initialized')

        # send the initial controller state
        self.child_pipe.send((None, RobotState(self.controller)))

        while True:
            # get the call information
            try:
                # update the viewer while waiting for a command
                while not self.child_pipe.poll():
                    self._update_viewer()
                    sleep(interval)

                # receive the command
                (self.knowledge_base, self.method, self.args, self.kwargs) = self.child_pipe.recv()
            except EOFError:
                logger.info('child {} shutdown requested'.format(self.child.pid))
                # signal to shut down
                break

            # distinguish between pure knowledge base updates and method calls
            if not self.method:
                self._update_viewer()
                self._send_result(None)
                continue

            # route remote calls
            self.init_args = (self.controller, self.knowledge_base)
            # hook the sleep method to update the viewer during execution
            self.kwargs['sleep'] = lambda n: self._sleep(n)
            BaseJob._handler(self)

        # clean up
        # need to call the superclass since this class overrides with empty method
        BaseJob._close()
        # TODO: shutdown the controller
        #controller.motion.shutdown()

        self.viewer.close()

    def _sleep(self, n):
        while n > 0:
            self._update_viewer()
            n -= interval
            sleep(interval)

    def _update_viewer(self):
        try:
            # check to see if the viewer is alive
            if self.viewer.heartbeat:
                # update the viewer
                logger.info('updating viewer')
                self.viewer.update(self.knowledge_base, RobotState(self.controller))
                logger.info('done updating viewer')
            else:
                logger.info('viewer missed heartbeat')
                pass
        except Exception as e:
            logger.error('failed to update the viewer: {}'.format(e))

    def _send_result(self, result):
        # attach the robot state to the job result
        BaseJob._send_result(self, (result, RobotState(self.controller)))

    def _check(self):
        # run even if child is still alive
        # check for a result before attempting a blocking call
        if self.pipe.poll():
            self._process_result()
            self._done = True

            # extract the robot state and update the knowledge base
            if not self.error:
                (self._result, self.knowledge_base.robot_state) = self.result

        # check that the child is alive
        # this is done after the poll since the child could have sent a result and then executed
        if not self.alive:
            logger.error('child {} died unexpectedly: {}'.format(self.child.pid, self.child.exitcode))
            # set this exception as the error result
            self._error = Exception('child died unexpectedly: {}'.format(self.child.exitcode))
            self._done = True

    def _close(self):
        # keep the pipes open by default
        pass
Example #10
0
import apc.baxter_scoop as baxter
from api.shared import KnowledgeBase
from apc.motion import PhysicalLowLevelController as LowLevelController
from integration.visualization import debug_cloud
from integration.camera import RemoteCamera
from integration.camera import uvtexture, invalid_mask
from integration.io import pcd
from perception.segmentation.shelf import shelf_subtract

limb = sys.argv[1]
if limb not in [ 'right', 'left' ]:
    logger.error('invalid limb: {}'.format(limb))
    raise SystemExit
bin = sys.argv[2]

world = WorldModel()
model = world.loadRobot(os.path.join('klampt_models', baxter.klampt_model_name))
shelf = world.loadRigidObject(os.path.join('klampt_models', 'north_shelf', 'shelf_with_bins.obj'))

robot = LowLevelController(model, baxter.klampt_model_name)
model.setConfig(robot.getSensedConfig())

shelf_xform = (
    numpy.array([[  0,  1,  0 ],
                 [ -1,  0,  0 ],
                 [  0,  0,  1 ]]),
    [ 1.03, -0.055, -0.9046 ]
)
shelf.setTransform(list(shelf_xform[0].flat), shelf_xform[1])

SHELF_DIMS_PATH = os.path.join('kb', 'shelf_dims.json')
Example #11
0
    def localizeSpecificObject(self, bin, object):
        SHELF_DIMS_PATH = os.path.join('kb', 'shelf_dims.json')
        SHELF_CLOUD_PATH = os.path.join('perception', 'segmentation', 'models', 'shelf_thin_10000.npy')
    
        # acquire the camera image
        address = { 'left': '192.168.0.103', 'right': '192.168.0.104' }[self.knowledge_base.active_limb]
        #address = { 'left': '192.168.0.103', 'right': '192.168.0.103' }[self.knowledge_base.active_limb]
        camera = RemoteCamera(address, xform=self.camera_xform)
        camera.read()
        camera.close()

        # load the shelf subtraction data
        bin_bounds = json.load(open(SHELF_DIMS_PATH))[bin]
        # load and transform the shelf point cloud
        shelf_cloud = numpy.load(open(SHELF_CLOUD_PATH))
        shelf_cloud = shelf_cloud.dot(numpy.array(self.knowledge_base.shelf_xform[0]).reshape((3,3))) + self.knowledge_base.shelf_xform[1]
        # load and transform the shelf model
        world = WorldModel()
        shelf = world.loadRigidObject(os.path.join('klampt_models', 'north_shelf', 'shelf_with_bins.obj'))
        shelf.setTransform(*self.knowledge_base.shelf_xform)

        # clean up the point cloud
        (clean_mask, clean_cloud) = camera.clean()
        _, clean_cloud_aligned, _, clean_object_mask = shelf_subtract(shelf_cloud, clean_cloud, shelf, bin_bounds, downsample=1000)
        object_mask = numpy.zeros(camera.cloud.shape[:2], dtype=numpy.bool)
        object_mask[clean_mask] = clean_object_mask

        #clean_cloud_aligned_color = map(lambda x: x[0] + [ x[1] ], zip(clean_cloud_aligned[clean_object_mask].tolist(), camera.colorize()[clean_mask][clean_object_mask].tolist()))
        #debug_cloud([ clean_cloud_aligned_color ], [ self.base_xform, self.camera_xform ])      
        #debug_cloud([ shelf_cloud, clean_cloud, clean_cloud_aligned ])
        #pcd.write(clean_cloud_aligned_color, open('/tmp/{}_{}.pcd'.format(self.knowledge_base.target_object, self.knowledge_base.target_bin), 'w'))

        (target_bin, target_object) = (self.knowledge_base.target_bin, self.knowledge_base.target_object)
        bin_contents = self.knowledge_base.bin_contents[target_bin] if target_bin in self.knowledge_base.bin_contents else []

        # perform special handling
        if target_object == 'rollodex_mesh_collection_jumbo_pencil_cup':
            logger.info('running specialized rollodex cups detection')
            return self._localizeRollodexCups(world, camera.color, clean_cloud_aligned, camera.depth_uv, clean_mask, object_mask)

        # dilate the object mask
        # from scipy.misc import imsave
        from scipy.ndimage.morphology import binary_dilation
        object_mask_dilated = binary_dilation(object_mask, iterations=2)
        # imsave('/tmp/test.png', object_mask)

        # label connected components
        (object_labeled, label_count) = distance_label(camera.cloud, object_mask_dilated, threshold=0.01)
        logger.info('found {} connected components'.format(label_count))       

        #KH: added to help debugging without changing order bin contents
        if object not in bin_contents:
            logger.warning("Warning, trying to detect object "+object+" not in bin contents, adding a temporary entry")
            bin_contents = bin_contents + [object]

        bin_contents_with_shelf = bin_contents + [ 'shelf' ]
        
        if not label_count:
            logger.warning('found no connected components')
            mask = object_mask
    
            bin_contents = self.knowledge_base.bin_contents[self.knowledge_base.target_bin]
            confusion_row = dict(zip(bin_contents, [0]*len(bin_contents)))
        else:
            object_blobs = sorted([ object_labeled == (l+1) for l in range(label_count) ], key=lambda b: -numpy.count_nonzero(b))
            #for (i, blob) in enumerate(object_blobs):
            #    logger.debug('blob {}: {} points'.format(i, numpy.count_nonzero(blob)))

            # filter out blobs with fewer than 1000 points
            min_point_count = 1000
            object_blobs = filter(lambda blob: numpy.count_nonzero(blob[clean_mask]) >= min_point_count, object_blobs)
                
            #debug_cloud([ clean_cloud_aligned[(object_mask & blob_mask)[clean_mask]] for blob_mask in object_blobs ], [ self.base_xform, self.camera_xform ], wait=False) 

            known_object_count = len(bin_contents)
            if known_object_count == 0:
                # return all large blobs
                n = len(object_blobs)
    
                # take only the largest n blobs
                blob_mask = reduce(numpy.bitwise_or, object_blobs[:n])
                mask = object_mask & blob_mask
                
                logger.warning('no objects in bin... -> returning all blobs')
            else:
                # load the matching statistics
                match_stats = json.load(open(os.path.join('perception', 'segmentation', 'match_stats.json')))            
                
                # load the object hue histograms
                known_histograms = {}
                for obj in bin_contents_with_shelf:
                    #array = numpy.load(os.path.join('perception', 'hue_array', '{}.npz'.format(obj)))['arr_0']
                    #known_histograms[obj] = numpy.histogram(array, bins=range(0,181), density=True)[0].reshape(-1,1).astype(numpy.float32)
                    known_histograms[obj] = numpy.load(os.path.join('perception', 'uv_hist2', '{}.npz'.format(obj)))['arr_0']
                                    
                # compute the color validity mask
                color_valid_mask = ~invalid_mask(camera.depth_uv)

                #KH addition 5/23: test to see whether blobs should be broken up
                if len(object_blobs)==1 and len(known_histograms) > 2:
                    logger.info("Trying KMeans segmentation to break up large blob")
                    blob_mask = reduce(numpy.bitwise_or, object_blobs)
                    mask = object_mask & blob_mask & color_valid_mask
                    labels,k,score = xyzrgb_segment_kmeans(camera.cloud[mask],camera.colorize()[mask],known_histograms.values())
                    labels = numpy.array(labels)
                    object_blobs = []
                    for i in xrange(len(known_histograms)):
                        blank = numpy.zeros_like(object_mask,dtype=numpy.bool)
                        blank[mask] = (labels==i)
                        object_blobs.append(blank)

                # score each blob for each object in the bin 
                blob_scores = []
                blob_accepts = []
                blob_rejects = []
                matched_blobs = {}
                confusion_matrix = []
                for (i, blob) in enumerate(object_blobs):
                    # compute the blob histogram
                    blob_uv = [ rgb2yuv(*rgb)[1:3] for rgb in camera.colorize()[object_mask & blob & color_valid_mask] ]
                    blob_histogram = make_uv_hist(blob_uv)
              
                    # compare the blob to each possible object                    
                    scores = dict([ (obj, 2 * numpy.minimum(blob_histogram, histogram).sum() - numpy.maximum(blob_histogram, histogram).sum()) for (obj, histogram) in known_histograms.items() ])                
                    
                    logger.debug('blob {}:'.format(i))
                    
                    blob_scores.append(scores)
                    logger.debug('  scores: {}'.format([ '{}={}'.format(*x) for x in scores.items() ]))
                    
                    # apply the cutoff to each score and associate with positive confidence
                    accepts = dict([ (obj, match_stats[obj]['ppv']) for obj in bin_contents_with_shelf if scores[obj] >= match_stats[obj]['cutoff'] ])
                    blob_accepts.append(accepts)
                    logger.debug('  accepts: {}'.format([ '{}={}'.format(*x) for x in accepts.items() ]))
                    
                    # apply the cutoff to each score and associate with negative confidence
                    rejects = dict([ (obj, match_stats[obj]['npv']) for obj in bin_contents_with_shelf if scores[obj] < match_stats[obj]['cutoff'] ])
                    blob_rejects.append(rejects)                    
                    logger.debug('  rejects: {}'.format([ '{}={}'.format(*x) for x in rejects.items() ]))

                    # populate the confusion matrix
                    shelf_probability = 0.1
                    S = lambda o: match_stats[o]['specificity']
                    iS = lambda o: 1 - S(o)
                    total_probability = sum(map(S, accepts)) + sum(map(iS, rejects)) + shelf_probability
                    confusion_matrix.append([ [ iS(o), S(o) ][ o in accepts ] / total_probability for o in bin_contents ])

                    # resolve multiple assignments
                    if len(accepts) > 1:
                        # choose the object with the highest matched confidence    
                        best_match_object = sorted(accepts.items(), key=lambda a: -a[1])[0][0]
                        # remove all other objects
                        for (obj, confidence) in accepts.items():
                            if obj != best_match_object:
                                del accepts[obj]
                                logger.warn('blob {} multiple accept ignored: {}={}'.format(i, obj, confidence))
                    
                    if len(accepts) == 1:
                        # a single match so record it
                        matched_blobs.setdefault(accepts.keys()[0], []).append(i)
                        logger.info('blob {} assigned to {} by single match'.format(i, accepts.keys()[0]))
                    
                confusion_matrix = numpy.array(confusion_matrix)
                logger.debug('confusion matrix:\n{}'.format(numpy.array(confusion_matrix)))
                    
                # assign blobs by least threshold difference until all objects are assigned (excluding shelf) or all blobs are assigned
                while len([ k for k in matched_blobs.keys() if k != 'shelf']) < len(bin_contents) and len(sum(matched_blobs.values(), [])) < len(object_blobs):
                    best_match_objects = []
                
                    for (i, scores) in enumerate(blob_scores):
                        # only consider unmatched blobs
                        if i not in sum(matched_blobs.values(), []):
                            threshold_differences = [ [obj, match_stats[obj]['cutoff'] - score]  for (obj, score) in scores.items() ]
                            best_match_objects.append([ i ] + sorted(threshold_differences, key=lambda d: d[1])[0])
                            
                    # choose the least threshold difference across all blobs and objects
                    best_blob, best_object, _ = sorted(best_match_objects, key=lambda b: b[2])[0]
                    matched_blobs.setdefault(best_object, []).append(best_blob)                    
                    logger.warn('blob {} assigned to {} by least threshold difference'.format(best_blob, best_object))
                
                # report unmatched blobs and objects (excluding shelf)
                for obj in bin_contents:
                    if obj not in matched_blobs:
                        logger.warn('no blobs matched {}'.format(obj))
                for i in range(len(object_blobs)):
                    if i not in sum(matched_blobs.values(), []):
                        logger.warn('blob {} is unassigned'.format(i))
                
                for obj in bin_contents_with_shelf:
                    if obj in matched_blobs:
                        print obj
                        mask = reduce(numpy.bitwise_or, [ object_blobs[i] for i in matched_blobs[obj] ], numpy.zeros_like(object_mask))
                        object_cloud = clean_cloud_aligned[mask[clean_mask]]
                        xform = (so3.identity(), object_cloud.mean(axis=0).tolist())
                        object_cloud_color = map(lambda x: x[0] + [ x[1] ], zip(object_cloud.tolist(), camera.colorize()[mask].tolist()))
                        #debug_cloud([ object_cloud_color ], [ self.base_xform, self.camera_xform, xform ])      
                
                # check that the target object was found
                if target_object not in matched_blobs:
                    logger.error('no blobs assigned to target object')
                    return None
                
                # return blob for the selected object
                mask = reduce(numpy.bitwise_or, [ object_blobs[i] for i in matched_blobs[target_object] ], numpy.zeros_like(object_mask))

                # return the confusion matrix rows for the selected object
                confusion_rows = confusion_matrix[matched_blobs[target_object], :]
                confusion_row = dict([ (obj, confusion_rows[:, i].mean()) for (i, obj) in enumerate(bin_contents) ])
                logger.info('confusion row for {}: {}'.format(target_object, confusion_row))

        object_cloud = clean_cloud_aligned[mask[clean_mask]]
        
        #xform = (so3.identity(), object_cloud.mean(axis=0).tolist())
        xform = se3.identity()
        object_cloud_color = map(lambda x: x[0] + [ x[1] ], zip(object_cloud.tolist(), camera.colorize()[mask].tolist()))
        #debug_cloud([ object_cloud_color ], [ self.base_xform, self.camera_xform, xform ])      
        
        # downsample the cloud to about 1000 points
        ratio = int(len(object_cloud) / 1000)
        if ratio > 1:
            object_cloud_color = object_cloud_color[::ratio]
        
        pcd.write(object_cloud_color, open('/tmp/{}_{}.pcd'.format(self.knowledge_base.target_object, self.knowledge_base.target_bin), 'w'))

        return (xform, object_cloud_color, confusion_row)
Example #12
0
from klampt.robotsim import Geometry3D,WorldModel
from klampt.math import se3,so3,vectorops
from klampt.vis import colorize
import sys
import time

if len(sys.argv) > 1:
    a = Geometry3D()
    if not a.loadFile(sys.argv[1]):
        print("Error loading",sys.argv[1])
        exit()
else:
    a = sphere(0.4,center=(0,0,0),type='TriangleMesh')
    a = Geometry3D(a)

w = WorldModel()
w.makeRigidObject("a")
w.rigidObject(0).geometry().set(a)

#a_pc = a.convert("PointCloud",0.05)
bb = a.getBBTight()
a_pc = a.convert("PointCloud")
a_pc.setCurrentTransform(so3.identity(),[1.25*(bb[1][0]-bb[0][0]),0,0])
value = None
cmap = None
feature = None
lighting = None

vis.add("A",a)
vis.add("B",a_pc)
Example #13
0
            motion.robot.left_mq.setRamp(v,0.5);
            v[2] = -1.5
            v[3] = 2.0
            motion.robot.left_mq.appendCubic(2.0,v,[0.0]*7);
        elif c=='s':
            motion.robot.stopMotion();


if __name__ == '__main__':
    config.parse_args()
    print "motion_testing.py: various random tests of the motion module."""
    print "Press q to exit."
    print

    print "Loading Motion Module model",config.klampt_model
    motion.setup(mode=config.mode,klampt_model=config.klampt_model,libpath="./")
    res = motion.robot.startup()
    if not res:
        print "Error starting up Motion Module"
        exit(1)
    time.sleep(0.1)
    world = WorldModel()
    res = world.readFile(config.klampt_model)
    if not res:
        raise RuntimeError("Unable to load Klamp't model "+fn)
    program = MotionTestUIViewer(world)
    program.run()
    motion.robot.shutdown()