Beispiel #1
0
 def test_baxter_ik(self):
     env = ParamSetup.setup_env()
     baxter = ParamSetup.setup_baxter()
     can = ParamSetup.setup_green_can(geom=(0.02, 0.25))
     baxter_body = OpenRAVEBody(env, baxter.name, baxter.geom)
     can_body = OpenRAVEBody(env, can.name, can.geom)
     baxter_body.set_transparency(0.5)
     can_body.set_transparency(0.5)
     manip = baxter_body.env_body.GetManipulator('right_arm')
     robot = baxter_body.env_body
     can = can_body.env_body
     dof = robot.GetActiveDOFValues()
     #Open the Gripper so there won't be collisions between gripper and can
     dof[9], dof[-1] = 0.02, 0.02
     robot.SetActiveDOFValues(dof)
     iktype = IkParameterizationType.Transform6D
     thetas = np.linspace(0, np.pi * 2, 10)
     target_trans = OpenRAVEBody.transform_from_obj_pose([0.9, -0.23, 0.93],
                                                         [0, 0, 0])
     can_body.env_body.SetTransform(target_trans)
     target_trans[:3, :3] = target_trans[:3, :3].dot(
         matrixFromAxisAngle([0, np.pi / 2, 0])[:3, :3])
     can_trans = target_trans
     body_trans = np.eye(4)
     """
     To check whether baxter ik model works, uncomment the following
     """
     # env.SetViewer('qtcoin')
     for theta in thetas:
         can_trans[:3, :3] = target_trans[:3, :3].dot(
             matrixFromAxisAngle([theta, 0, 0])[:3, :3])
         solution = manip.FindIKSolutions(
             IkParameterization(can_trans, iktype),
             IkFilterOptions.CheckEnvCollisions)
         if len(solution) > 0:
             print "Solution found with pose and rotation:"
             print OpenRAVEBody.obj_pose_from_transform(can_trans)
         else:
             print "Solution not found with pose and rotation:"
             print OpenRAVEBody.obj_pose_from_transform(can_trans)
         for sols in solution:
             dof[10:17] = sols
             robot.SetActiveDOFValues(dof)
             time.sleep(.2)
             body_trans[:3, :3] = can_trans[:3, :3].dot(
                 matrixFromAxisAngle([0, -np.pi / 2, 0])[:3, :3])
             can_body.env_body.SetTransform(body_trans)
             self.assertTrue(
                 np.allclose([0.9, -0.23, 0.93],
                             manip.GetTransform()[:3, 3]))
    def __init__(self):
        self.env  = openravepy.Environment()
        self.env.StopSimulation()
        self.env.Load(osp.join(cad_files_dir, 'table_sim.xml'))
        self.env.Load("robots/pr2-beta-static.zae")
        self.env.SetViewer('qtcoin')
        print self.env.GetViewer()

        self.robot = self.env.GetRobots()[0]
        
        self.cam_tfm = openravepy.matrixFromAxisAngle(np.pi*np.array([-0.7,0.7,0]))
        self.cam_tfm = openravepy.matrixFromAxisAngle([0,0,np.pi]).dot(self.cam_tfm)
        self.cam_tfm[:3,3] = np.array([0.5,0.0, 2])
        
        self.rope  = None
Beispiel #3
0
    def PlaceObjectRandomOrientation(self, objectName, scale=1):
        '''Places the specified object in the center with a (semi-)random orientation.'''

        with self.env:

            # load object into environment
            self.env.Load(objectName, {'scalegeometry': str(scale)})

            idx1 = objectName.rfind("/")
            idx2 = objectName.rfind(".")
            shortObjectName = objectName[idx1 + 1:idx2]
            obj = self.env.GetKinBody(shortObjectName)

            # set physical properties
            l = obj.GetLinks()[0]
            l.SetMass(1)
            l.SetStatic(False)

            # choose a semi-random orientation
            orientOptions = [\
              [(1,0,0),  pi/2, (0, 1,0), 1],\
              [(1,0,0), -pi/2, (0,-1,0), 1],\
              [(0,1,0),  pi/2, (-1,0,0), 0],\
              [(0,1,0), -pi/2, ( 1,0,0), 0],\
              [(0,0,1),     0, (0,0, 1), 2],
              [(1,0,0),    pi, (0,0,-1), 2]]
            # [axis, angle, newAxis, newAxisIndex]

            optionIndex = randint(0, len(orientOptions))
            orientOption = orientOptions[optionIndex]
            randomAngle = 2 * pi * rand()

            R1 = openravepy.matrixFromAxisAngle(orientOption[0],
                                                orientOption[1])
            R2 = openravepy.matrixFromAxisAngle(orientOption[2], randomAngle)

            # adjust position to be above table
            boundingBox = obj.ComputeAABB().extents()

            T = eye(4)
            T[2, 3] = boundingBox[orientOption[3]] + self.tableExtents[2]

            # set object transform
            # print orientOption, randomAngle*(180/pi), boundingBox[orientOption[3]]
            objPose = dot(T, dot(R1, R2))
            obj.SetTransform(objPose)

        return obj, objPose
  def rotate_by(self, ang):
    #TODO Fill in, remove sleep
    #time.sleep(0)

    T = openravepy.matrixFromAxisAngle([0,0,ang]) # Get the transformation matrix
    with self.env:
    	self.robot.SetTransform(np.dot(T,self.robot.GetTransform())) # Applying the new transformation
Beispiel #5
0
def get_ik_transform(pos, rot):
    trans = OpenRAVEBody.transform_from_obj_pose(pos, rot)
    # Openravepy flip the rotation axis by 90 degree, thus we need to change it back
    rot_mat = matrixFromAxisAngle([0, PI / 2, 0])
    trans_mat = trans[:3, :3].dot(rot_mat[:3, :3])
    trans[:3, :3] = trans_mat
    return trans
def generate_random_pos(robot, obj_to_grasp = None):
    """Generate a random position for the robot within the boundaries of the 
    world.
    """
    
    T = robot.GetTransform()
    if obj_to_grasp is None:
        max_x = 2.5
        max_y = 2.5
        max_th = np.pi
    else:
        obj_pos = obj_to_grasp.GetTransform()[:3,-1]
        max_x = obj_pos[0] - 1.0
        max_y = obj_pos[1] - 1.0
    
    
    x = np.random.uniform(-max_x, max_x)
    y = np.random.uniform(-max_y, max_y)
    z = T[2,3]
    
    if obj_to_grasp is None:
        th = np.random.uniform(-max_th, max_th)
    else:
        robot_pos = T[:3,-1]
        th = np.arctan2(obj_pos[1] -y , obj_pos[0] - x)
    
    #rotation
    T = openravepy.matrixFromAxisAngle([0,0,th])
    #translation
    T[:, -1] = [x, y, z, 1]    
    return T
 def rotate_by(self, ang):
     # TODO Fill in, remove sleep
     # Tz = np.matrix('0 1 0;-1 0 0; 0 0 1')
     Tz = openravepy.matrixFromAxisAngle([0, 0, ang])
     with self.env:
         self.robot.SetTransform(np.dot(self.robot.GetTransform(), Tz))
     time.sleep(0)
Beispiel #8
0
def get_ee_transform_from_pose(pose, rotation):
    ee_trans = OpenRAVEBody.transform_from_obj_pose(pose, rotation)
    #the rotation is to transform the tool frame into the end effector transform
    rot_mat = matrixFromAxisAngle([0, PI / 2, 0])
    ee_rot_mat = ee_trans[:3, :3].dot(rot_mat[:3, :3])
    ee_trans[:3, :3] = ee_rot_mat
    return ee_trans
Beispiel #9
0
    def SimulateObjectMovementOnClose(self, descriptor, obj):
        '''The object can move when the fingers close during a grasp.
      This sets the object to an approximation to the correct resultant pose.
    - Input descriptor: Grasp pose. Assumes this is a valid grasp.
    - Input obj: The object being grasped.
    - Returns None.
    '''

        # get object pose in hand frame
        bTo = obj.GetTransform()
        hTb = inv(descriptor.T)
        hTo = dot(hTb, bTo)

        if self.IsBottleUpright(obj):
            # Top grasp. Simply set the y-position to 0.
            hTo[1, 3] = 0
        else:
            # Side grasp.
            # Set y = 0 at the point of contact along the bottle axis.
            alpha = -hTo[0, 3] / hTo[0, 2]
            deltaY = hTo[1, 3] + alpha * hTo[1, 2]
            hTo[1, 3] -= deltaY
            # Set the orientation to be vertical in the hand.
            zAxis = hTo[0:2, 2] if hTo[0, 2] >= 0 else -hTo[0:2, 2]
            angle = arccos(dot(zAxis, array([1.0, 0.0])) / norm(zAxis))
            angle = angle if zAxis[1] <= 0 else 2 * pi - angle
            handDepthAxis = array([0.0, 0.0, 1.0])
            T = openravepy.matrixFromAxisAngle(handDepthAxis, angle)
            hTo = dot(T, hTo)

        # set the object's new pose in the base frame
        bToNew = dot(descriptor.T, hTo)
        obj.SetTransform(bToNew)
Beispiel #10
0
def main():
    global m
    global e

    e = r.Environment()
    m = r.RaveCreateModule(e, 'orchomp')
    e.SetViewer('qtcoin')

    e.Load("intelkitchen_robotized_herb2_nosensors.env.xml")
    robot = e.GetRobot("Herb2")

    # set the manipulator to leftarm
    #ikmodel = databases.inversekinematics.InverseKinematicsModel(
    #            robot,iktype=IkParameterization.Type.Transform6D)
    #if not ikmodel.load():
    #    ikmodel.autogenerate()

    Tz = r.matrixFromAxisAngle([0, 0, numpy.pi / 2])
    Tz[0, 3] = 0.4
    Tz[1, 3] = 1.6
    Tz[2, 3] = -0.01

    print Tz
    with e:
        for body in e.GetBodies():
            body.SetTransform(numpy.dot(Tz, body.GetTransform()))

    readFileCommands()
    def observe_cloud2(self, radius, axis_upsample=0, radius_sample=0, angle_sample=0):
        axis_pts = self.rope.GetControlPoints()
        indices = range(len(axis_pts))
        if axis_upsample != 0:
            lengths = np.r_[0, self.rope.GetHalfHeights() * 2]
            summed_lengths = np.cumsum(lengths)
            assert len(lengths) == len(axis_pts)
            indices = np.interp(np.linspace(0, summed_lengths[-1], axis_upsample*len(lengths)), summed_lengths, indices)
            axis_pts = math_utils.interp2d(np.linspace(0, summed_lengths[-1], axis_upsample*len(lengths)), summed_lengths, axis_pts)
        
        half_heights = self.rope.GetHalfHeights()
        rotates = self.rope.GetRotations()
        translates = self.rope.GetTranslations()
        new_pts = []
        for r in range(1, radius_sample+1):
            d = r * radius / float(radius_sample) 
            for a in range(angle_sample):
                local_rotate = matrixFromAxisAngle([a/float(angle_sample)*2*np.pi,0,0])[:3,:3]
                for index in indices:
                    node_id = np.min([np.floor(index), len(self.rope.GetNodes()) - 1])
                    h = half_heights[node_id]
                    v = np.array([-h + 2*h*(index-node_id),d,0])
                    rotate = rotates[node_id]
                    translate = translates[node_id]
                    v = rotate.dot(local_rotate.dot(v)) + translate
                    new_pts.append(v)

        for pt in axis_pts:
            new_pts.append(pt)
                
        return np.array(new_pts)
Beispiel #12
0
    def test_grid_to_trajectories(self):

        points = array([2831, 2832, 2833, 2777, 2778, 2779, 2780])
        borders = array([[-0.34395401, 0.78002898, -3.65800373], [0.04986939, -0.00596685, -3.75569145]])
        point_vert = array([-0.94672199,  0.76057971, -1.52572671, -0.82705471, -0.51077813,
       -0.23470453])
        T = matrixFromAxisAngle([pi / 4, 0, 0])

        gtt_00 = self.DB_00.load_grid_to_trajectories()
        self.assertTrue(isinstance(gtt_00,dict), msg='grid_to_trajectories 00 should be a dictionary')
        self.assertTrue(len(gtt_00[0])==2,
                        msg='grid_to_trajectories[0] should be a size 2 list (trajectories and borders)')
        self.assertTrue(len(gtt_00[0][0]) == 151,
                        msg='grid_to_trajectories[0][0] should have 151 trajectories')
        assert_allclose(gtt_00[0][0][0], points)
        assert_allclose(gtt_00[0][1][0], borders)
        borders_45 = [dot(T[0:3,0:3],gtt_00[0][1][0][0]),dot(T[0:3,0:3],gtt_00[0][1][0][1])]

        gtt_45 = self.DB_45.load_grid_to_trajectories()
        self.assertTrue(isinstance(gtt_45, dict), msg='grid_to_trajectories 45 should be a dictionary')
        self.assertTrue(len(gtt_45[0]) == 2,
                        msg='grid_to_trajectories[0] should be a size 2 list (trajectories and borders)')
        self.assertTrue(len(gtt_45[0][0]) == 151,
                        msg='grid_to_trajectories[0][0] should have 151 trajectories')
        assert_allclose(gtt_45[0][0][0], points)
        assert_allclose(gtt_45[0][1][0], borders_45)

        gtt_vert = self.DB_VERT.load_grid_to_trajectories()
        self.assertTrue(isinstance(gtt_vert, dict), msg='grid_to_trajectories vert should be a dictionary')
        self.assertTrue(len(gtt_vert[64]) == 26,
                        msg='grid_to_trajectories[0] should be a size 26 list (trajectories)')
        self.assertTrue(len(gtt_vert[64][0]) == 118,
                        msg='grid_to_trajectories[0][0] should have 118 points')
        assert_allclose(gtt_vert[64][0][0], point_vert)
Beispiel #13
0
def observe_cloud(pts, radius, upsample=0, upsample_rad=1):
    """
    If upsample > 0, the number of points along the rope's backbone is resampled to be upsample points
    If upsample_rad > 1, the number of points perpendicular to the backbone points is resampled to be upsample_rad points, around the rope's cross-section
    The total number of points is then: (upsample if upsample > 0 else len(self.rope.GetControlPoints())) * upsample_rad
    """
    if upsample > 0:
        lengths = np.r_[0, np.apply_along_axis(np.linalg.norm, 1, np.diff(pts, axis=0))]
        summed_lengths = np.cumsum(lengths)
        assert len(lengths) == len(pts)
        pts = math_utils.interp2d(np.linspace(0, summed_lengths[-1], upsample), summed_lengths, pts)
    if upsample_rad > 1:
        # add points perpendicular to the points in pts around the rope's cross-section
        vs = np.diff(pts, axis=0) # vectors between the current and next points
        vs /= np.apply_along_axis(np.linalg.norm, 1, vs)[:,None]
        perp_vs = np.c_[-vs[:,1], vs[:,0], np.zeros(vs.shape[0])] # perpendicular vectors between the current and next points in the xy-plane
        perp_vs /= np.apply_along_axis(np.linalg.norm, 1, perp_vs)[:,None]
        vs = np.r_[vs, vs[-1,:][None,:]] # define the vector of the last point to be the same as the second to last one
        perp_vs = np.r_[perp_vs, perp_vs[-1,:][None,:]] # define the perpendicular vector of the last point to be the same as the second to last one
        perp_pts = []
        from openravepy import matrixFromAxisAngle
        for theta in np.linspace(0, 2*np.pi, upsample_rad, endpoint=False): # uniformly around the cross-section circumference
            for (center, rot_axis, perp_v) in zip(pts, vs, perp_vs):
                rot = matrixFromAxisAngle(rot_axis, theta)[:3,:3]
                perp_pts.append(center + rot.T.dot(radius * perp_v))
        pts = np.array(perp_pts)
    return pts
def generate_random_pos(robot, obj_to_grasp=None):
    """Generate a random position for the robot within the boundaries of the 
    world.
    """

    T = robot.GetTransform()
    if obj_to_grasp is None:
        max_x = 2.5
        max_y = 2.5
        max_th = np.pi
    else:
        obj_pos = obj_to_grasp.GetTransform()[:3, -1]
        max_x = obj_pos[0] - 1.0
        max_y = obj_pos[1] - 1.0

    x = np.random.uniform(-max_x, max_x)
    y = np.random.uniform(-max_y, max_y)
    z = T[2, 3]

    if obj_to_grasp is None:
        th = np.random.uniform(-max_th, max_th)
    else:
        robot_pos = T[:3, -1]
        th = np.arctan2(obj_pos[1] - y, obj_pos[0] - x)

    #rotation
    T = openravepy.matrixFromAxisAngle([0, 0, th])
    #translation
    T[:, -1] = [x, y, z, 1]
    return T
Beispiel #15
0
 def get_ik_transform(pos, rot):
     trans = OpenRAVEBody.transform_from_obj_pose(pos, rot)
     # Openravepy flip the rotation axis by 90 degree, thus we need to change it back
     rot_mat = matrixFromAxisAngle([0, np.pi/2, 0])
     trans_mat = trans[:3, :3].dot(rot_mat[:3, :3])
     trans[:3, :3] = trans_mat
     return trans
def set_camera(orviewer, orcamparams):
   root = xml.etree.ElementTree.fromstring('<root>{}</root>'.format(orcamparams))
   ra = map(float,root.find('camrotationaxis').text.split())
   axisangle = numpy.array(ra[0:3]) * ra[3] * numpy.pi / 180.0
   H = openravepy.matrixFromAxisAngle(axisangle)
   H[0:3,3] = map(float,root.find('camtrans').text.split())
   e.GetViewer().SetCamera(H, float(root.find('camfocal').text))
    def SampleActions(self):
        '''TODO'''

        if self.nPlaceOrientations != 3:
            raise Exception(
                "Currently, exactly 3 orientations only supported for place.")

        y90 = openravepy.matrixFromAxisAngle([0.0, 1.0, 0.0], -pi / 2.0)[0:3,
                                                                         0:3]
        x180 = openravepy.matrixFromAxisAngle([1.0, 0.0, 0.0], pi)[0:3, 0:3]

        R1 = y90  # approach aligned with world x
        R2 = eye(3)  # approach aligned with world -z
        R3 = dot(x180, y90)  # R1 flipped 180 about approach

        return [R1, R2, R3]
Beispiel #18
0
 def ik_solution(self, manip, iktype, ee_pos, ee_rot):
     ee_trans = OpenRAVEBody.transform_from_obj_pose(ee_pos, ee_rot)
     # Openravepy flip the rotation axis by 90 degree, thus we need to change it back
     rot_mat = matrixFromAxisAngle([0, np.pi/2, 0])
     ee_trans = ee_trans.dot(rot_mat)
     solutions = manip.FindIKSolutions(IkParameterization(ee_trans,iktype),IkFilterOptions.CheckEnvCollisions)
     return solutions
Beispiel #19
0
 def ik_solution(self, manip, iktype, ee_pos, ee_rot):
     ee_trans = OpenRAVEBody.transform_from_obj_pose(ee_pos, ee_rot)
     # Openravepy flip the rotation axis by 90 degree, thus we need to change it back
     rot_mat = matrixFromAxisAngle([0, np.pi / 2, 0])
     ee_trans = ee_trans.dot(rot_mat)
     solutions = manip.FindIKSolutions(IkParameterization(ee_trans, iktype),
                                       IkFilterOptions.CheckEnvCollisions)
     return solutions
def load_segment(demofile, segment, fake_data_transform=[0, 0, 0, 0, 0, 0]):
    fake_seg = demofile[segment]
    new_xyz = np.squeeze(fake_seg["cloud_xyz"])
    hmat = openravepy.matrixFromAxisAngle(fake_data_transform[3:6])  # @UndefinedVariable
    hmat[:3, 3] = fake_data_transform[0:3]
    new_xyz = new_xyz.dot(hmat[:3, :3].T) + hmat[:3, 3][None, :]
    r2r = ros2rave.RosToRave(Globals.robot, asarray(fake_seg["joint_states"]["name"]))
    return new_xyz, r2r
Beispiel #21
0
def load_fake_data_segment(demofile, fake_data_segment, fake_data_transform):
    fake_seg = demofile[fake_data_segment]
    new_xyz = np.squeeze(fake_seg["cloud_xyz"])[:, :3]
    hmat = openravepy.matrixFromAxisAngle(fake_data_transform[3:6])
    hmat[:3, 3] = fake_data_transform[0:3]
    new_xyz = new_xyz.dot(hmat[:3, :3].T) + hmat[:3, 3][None, :]
    joint_names = asarray(fake_seg["joint_states"]["name"])
    joint_values = asarray(fake_seg["joint_states"]["position"][0])
    return new_xyz, joint_names, joint_values
Beispiel #22
0
    def sampling(self):
        """ The sampling method is an algorithm for uniformly sampling objects.
        It computes the bounding box of the object (object inscribed by cube), uniformly
        samples the box's faces, and checks rays collision from cube's samples to the object.
        Rays with collision and its normal vectors are stored as object's samples.
        Finally, a kdtree is computed from object's samples and neighbooring points are removed  
        to generate an uniformly sampling on the object (filtering data by distance).

        This is a data-intensive computing and might freeze your computer.
        """

        delta = self.samples_delta
        min_distance_between_points = self.min_distance_between_points

        cc = RaveCreateCollisionChecker(self.turbine.env, 'ode')
        if cc is not None:
            ccold = self.turbine.env.GetCollisionChecker()
            self.turbine.env.SetCollisionChecker(cc)
            cc = ccold

        self._blade.SetTransform(eye(4))
        self._blade.SetTransform(dot(self._blade.GetTransform(),
                                     matrixFromAxisAngle([0, -self.turbine.config.environment.blade_angle, 0]))
                                 )

        ab = self._blade.ComputeAABB()
        p = ab.pos()
        e = ab.extents() + 0.01  # increase since origin of ray should be outside of object
        sides = array((
            (e[0], 0, 0, -1, 0, 0, 0, e[1], 0, 0, 0, e[2]),  # x
            (-e[0], 0, 0, 1, 0, 0, 0, e[1], 0, 0, 0, e[2]),  # -x
            (0, 0, -e[2], 0, 0, 1, e[0], 0, 0, 0, e[1], 0),  # -z
            (0, 0, e[2], 0, 0, -1, e[0], 0, 0, 0, e[1], 0),  # z
            (0, e[1], 0, 0, -1, 0, e[0], 0, 0, 0, 0, e[2]),  # y
            (0, -e[1], 0, 0, 1, 0, e[0], 0, 0, 0, 0, e[2])  # -y
        ))
        maxlen = 2 * sqrt(sum(e ** 2)) + 0.03
        self.points = zeros((0, 6))
        for side in sides:
            ex = sqrt(sum(side[6:9] ** 2))
            ey = sqrt(sum(side[9:12] ** 2))
            XX, YY = meshgrid(r_[arange(-ex, -0.25 * delta, delta), 0, arange(delta, ex, delta)],
                              r_[arange(-ey, -0.25 * delta, delta), 0, arange(delta, ey, delta)])
            localpos = outer(XX.flatten(), side[6:9] / ex) + outer(YY.flatten(), side[9:12] / ey)
            N = localpos.shape[0]
            rays = c_[tile(p + side[0:3], (N, 1)) + localpos,
                      maxlen * tile(side[3:6], (N, 1))]
            collision, info = self.turbine.env.CheckCollisionRays(rays, self._blade)
            # make sure all normals are the correct sign: pointing outward from the object)
            newinfo = info[collision, :]
            if len(newinfo) > 0:
                newinfo[sum(rays[collision, 3:6] * newinfo[:, 3:6], 1) > 0, 3:6] *= -1
                self.points = r_[self.points, newinfo]
        self.points = mathtools.filter_by_distance(self.points, min_distance_between_points)
        self.samples_delta = delta
        self.min_distance_between_points = min_distance_between_points
        return
Beispiel #23
0
    def test_get_sorted_points(self):
        point_0 = (-0.152195798, 0.538141088, -3.765726656)
        T = matrixFromAxisAngle([pi / 4, 0, 0])
        ntp_00 = self.DB_00.get_sorted_points()
        assert_allclose(ntp_00[0], point_0)
        point_45 = dot(T[0:3,0:3], point_0)

        ntp_45 = self.DB_45.get_sorted_points()
        assert_allclose(ntp_45[0], point_45)
Beispiel #24
0
def load_fake_data_segment(demofile, fake_data_segment, fake_data_transform):
    fake_seg = demofile[fake_data_segment]
    new_xyz = np.squeeze(fake_seg["cloud_xyz"])[:,:3]
    hmat = openravepy.matrixFromAxisAngle(fake_data_transform[3:6])
    hmat[:3,3] = fake_data_transform[0:3]
    new_xyz = new_xyz.dot(hmat[:3,:3].T) + hmat[:3,3][None,:]
    joint_names = asarray(fake_seg["joint_states"]["name"])
    joint_values = asarray(fake_seg["joint_states"]["position"][0])
    return new_xyz, joint_names, joint_values
def load_fake_data_segment(demofile, fake_data_segment, fake_data_transform, set_robot_state=True):
    fake_seg = demofile[fake_data_segment]
    new_xyz = np.squeeze(fake_seg["cloud_xyz"])
    hmat = openravepy.matrixFromAxisAngle(fake_data_transform[3:6])
    hmat[:3,3] = fake_data_transform[0:3]
    new_xyz = new_xyz.dot(hmat[:3,:3].T) + hmat[:3,3][None,:]
    r2r = ros2rave.RosToRave(Globals.robot, asarray(fake_seg["joint_states"]["name"]))
    if set_robot_state:
        r2r.set_values(Globals.robot, asarray(fake_seg["joint_states"]["position"][0]))
    return new_xyz, r2r
Beispiel #26
0
 def getTransform(self, cfg=None):
     Placement = eye(4)
     Placement[0:3, 3] = self.getXYZ(cfg)
     if cfg is not None:
         Placement[0:3, 3] += [
             0, 0, 2.0 * cfg.environment.robot_level_difference
         ]
     R = matrixFromAxisAngle(
         [0, 0, self.alpha + (sign(self.p) + 1) * pi / 2.0])
     return dot(Placement, R)
Beispiel #27
0
def get_ee_transform_from_pose(pose, rotation):
    """
        This helper function that returns the correct end effector rotation axis (perpendicular to gripper side)
    """
    ee_trans = OpenRAVEBody.transform_from_obj_pose(pose, rotation)
    #the rotation is to transform the tool frame into the end effector transform
    rot_mat = matrixFromAxisAngle([0, PI / 2, 0])
    ee_rot_mat = ee_trans[:3, :3].dot(rot_mat[:3, :3])
    ee_trans[:3, :3] = ee_rot_mat
    return ee_trans
Beispiel #28
0
 def test_draw_plane(self):
     np.random.seed(123)
     axis = np.random.randn(3)
     axis /= np.linalg.norm(axis)
     angle = np.deg2rad(45)
     transform = orpy.matrixFromAxisAngle(angle * axis)
     transform[:3, 3] = np.random.randn(3) * 0.5
     h = ru.visual.draw_plane(self.env, transform)
     if self.display_available:
         self.assertEqual(type(h), orpy.GraphHandle)
Beispiel #29
0
    def test_compute_robot_joints(self):
        """
        The test generates a trajectory and computes robot joints.
        """
        turbconf = TurbineConfig.load(self.path, self.test_dir)
        turb = Turbine(turbconf)
        robot_pos = turb.robot.GetTransform()[0:3,3]

        theta = arange(-pi/10,pi/10,0.001)
        # Generating positions
        x = -2.7 + (2 - cos(3*theta))*cos(theta)
        y = 1 + (2 - cos(3*theta))*sin(theta)
        z = zeros(len(theta))
        positions = [[x[i],y[i],z[i]] for i in range(0,len(x))]

        # Generating normal vectors
        ny = 2*cos(theta)+cos(2*theta)-2*cos(4*theta)
        nx = -2*sin(theta)+sin(2*theta)+2*sin(4*theta)
        nz = zeros(len(theta))
        normal_vectors = array([[nx[i],ny[i],nz[i]] for i in range(0,len(nx))])
        normal_vectors = normal_vectors*(1.0/sqrt(sum(normal_vectors*normal_vectors,1))).reshape(len(normal_vectors),1)
        for i in range(0,len(normal_vectors)):
            normal_vectors[i] = cross(normal_vectors[i],[0,0,1])

        trajectory = [[positions[i][0], positions[i][1], positions[i][2],
                       normal_vectors[i][0], normal_vectors[i][1],
                       normal_vectors[i][2]] for i in range(0,len(positions))]

        X90 = matrixFromAxisAngle([pi/2, 0, 0])
        Z180 = matrixFromAxisAngle([0, 0, pi])
        trajectory = array(mathtools.rotate_trajectories(turb, [trajectory], dot(Z180,X90))[0])
        trajectory[:,2] = trajectory[:,2] + robot_pos[2]

##        from .. import visualizer
##        vis = visualizer.Visualizer(turb.env)
##        vis.plot(array(trajectory)[:,0:3])
##        vis.plot_normal(trajectory)
##        x = raw_input('wait')

        joint_solutions = robot_utils.compute_robot_joints(turb, trajectory, 0)
        self.assertTrue(len(joint_solutions)==len(trajectory),
                        msg = 'The trajectory is not feasible')
Beispiel #30
0
    def SampleActions(self):
        '''TODO'''

        theta = linspace(-pi / 2.0, pi / 2.0, self.nGraspOrientations)
        axis = array([0.0, 0.0, 1.0])

        rotations = []
        for t in theta:
            R = openravepy.matrixFromAxisAngle(axis, t)[0:3, 0:3]
            rotations.append(R)

        return rotations
def main():
    
    for i in range(1):
        """
        Tz = r.matrixFromAxisAngle([0,0,numpy.pi/2])
        Tz[0,3] = 0.4  
        Tz[1,3] = 1.6
        Tz[2,3] = -0.01
        """
        Tz = r.matrixFromAxisAngle([0,0,0])
        Tz[2,3] = 0.01
        runtrial( Tz , False, True, False )
Beispiel #32
0
def find_ik_solutions(robot, target, iktype, collision_free=True, freeinc=0.1):
    """
  Find all the possible IK solutions

  Parameters
  ----------
  robot: orpy.Robot
    The OpenRAVE robot
  target: array_like or orpy.Ray
    The target in the task space (Cartesian). A target can be defined as a Ray
    (position and direction) which is 5D or as an homogeneous transformation
    which is 6D.
  iktype: orpy.IkParameterizationType
    Inverse kinematics type to use. Supported values:
    `orpy.IkParameterizationType.Transform6D` and
    `orpy.IkParameterizationType.TranslationDirection5D`
  collision_free: bool, optional
    If true, find only collision-free solutions
  freeinc: float, optional
    The free increment (discretization) to be used for the free DOF when the
    target is 5D.

  Returns
  -------
  solutions: list
    The list of IK solutions found
  """
    # Populate the target list
    target_list = []
    if iktype == orpy.IkParameterizationType.TranslationDirection5D:
        if type(target) is not orpy.Ray:
            ray = ru.conversions.to_ray(target)
            target_list.append(ray)
        else:
            target_list.append(target)
    elif iktype == orpy.IkParameterizationType.Transform6D:
        if type(target) is orpy.Ray:
            Tray = ru.conversions.from_ray(target)
            for angle in np.arange(0, 2 * np.pi, freeinc):
                Toffset = orpy.matrixFromAxisAngle(angle * br.Z_AXIS)
                target_list.append(np.dot(Tray, Toffset))
        else:
            target_list.append(target)
    # Concatenate all the solutions
    solutions = []
    for goal in target_list:
        ikparam = orpy.IkParameterization(goal, iktype)
        manipulator = robot.GetActiveManipulator()
        opt = 0
        if collision_free:
            opt = orpy.IkFilterOptions.CheckEnvCollisions
        solutions += list(manipulator.FindIKSolutions(ikparam, opt))
    return solutions
Beispiel #33
0
def main():

    for i in range(1):
        """
        Tz = r.matrixFromAxisAngle([0,0,numpy.pi/2])
        Tz[0,3] = 0.4  
        Tz[1,3] = 1.6
        Tz[2,3] = -0.01
        """
        Tz = r.matrixFromAxisAngle([0, 0, 0])
        Tz[2, 3] = 0.01
        runtrial(Tz, False, True, False)
    def control_robot_with_the_keyboard(self):
        OFFSET = 0.07

        print ""
        print "w: Move arm forward."
        print "s: Move arm backwards."
        print "a: Move arm left."
        print "d: Move arm right."
        print "q: Rotate end-effector anticlockwise."
        print "e: Rotate end-effector clockwise."
        print "o: Open end-effector."
        print "c: Close end-effector."
        print ""
        print "Type 'exit' to stop the demo and enter in IPython environment."

        while True:
            action = raw_input("Menu Action: ")

            if action == "exit":  # EXIT Demo
                break
            if action == "q":  # Rotate Anti-clockwise
                anticlockwise = matrixFromAxisAngle([0, 0, numpy.pi / 6])
                self.rotate_hand(anticlockwise)
            elif action == "e":  # Rotate Clockwise
                clockwise = matrixFromAxisAngle([0, 0, -numpy.pi / 6])
                self.rotate_hand(clockwise)
            elif action == "o":  # Open Gripper
                self.robot.open_gripper()
            elif action == "c":  # Close Gripper
                self.robot.close_gripper()
            elif action in ["w", "s", "a", "d"]:
                x_offset = 0
                y_offset = 0

                if action == "w": x_offset = OFFSET  # Move Forward
                if action == "s": x_offset = -OFFSET  # Move Backwards
                if action == "a": y_offset = -OFFSET  # Move Left
                if action == "d": y_offset = OFFSET  # Move Right

                self.move_hand(x_offset, y_offset)
Beispiel #35
0
    def test_load_blade(self):
        point_00 = array([-0.1521958 ,  0.53814109, -3.76572666,  0.08927295,  0.16974092,
       -0.98143689])
        T = matrixFromAxisAngle([pi / 4, 0, 0])

        blade_00 = self.DB.load_blade()
        self.assertTrue(isinstance(blade_00.trajectories,list))
        assert_allclose(blade_00.trajectories[0][0],point_00)
        point_45 = concatenate((dot(T[0:3, 0:3], blade_00.trajectories[0][0][0:3]),
                                dot(T[0:3, 0:3], blade_00.trajectories[0][0][3:6])))

        blade_45 = self.DB_45.load_blade()
        self.assertTrue(isinstance(blade_45.trajectories, list))
        assert_allclose(blade_45.trajectories[0][0], point_45)
Beispiel #36
0
def rotate_point_cloud_tfm(cloud):
    A = np.cov(cloud.T)
    U, _, _ = np.linalg.svd(A)
    
    if U[2, -1] < 0:
        dir = -U[:, -1]
    else:
        dir = U[:, -1]
        
    center_xyz = np.median(cloud, axis=0)
    axis = np.cross(dir, [0,0,1])
    angle = np.arccos(np.dot(dir, [0,0,1]))
    tfm = openravepy.matrixFromAxisAngle(axis, angle)
    return tfm
Beispiel #37
0
def rotate_point_cloud_tfm(cloud):
    A = np.cov(cloud.T)
    U, _, _ = np.linalg.svd(A)

    if U[2, -1] < 0:
        dir = -U[:, -1]
    else:
        dir = U[:, -1]

    center_xyz = np.median(cloud, axis=0)
    axis = np.cross(dir, [0, 0, 1])
    angle = np.arccos(np.dot(dir, [0, 0, 1]))
    tfm = openravepy.matrixFromAxisAngle(axis, angle)
    return tfm
Beispiel #38
0
 def Move(self, vel, execute=False, timeout=None, **kwargs):
     direction = numpy.array([1., 0., 0.])
     with self.robot.GetEnv():
         start_pose = self.robot.GetTransform()
         offset_pose = numpy.eye(4)
         offset_pose[0:3, 3] = vel[0] * direction
         transl_pose = numpy.dot(start_pose, offset_pose)
         relative_pose = openravepy.matrixFromAxisAngle([0., 0., vel[1]])
         goal_pose = numpy.dot(transl_pose, relative_pose)
     traj = util.create_affine_trajectory(self.robot,
                                          [start_pose, goal_pose])
     if (execute):
         self.robot.ExecutePath(traj, **kwargs)
     return traj
Beispiel #39
0
  def ComposeAction(self, prevDesc, theta):
    '''Creates a new action and hand descriptor objects.'''

    action = [prevDesc.image, copy(theta)]

    R = openravepy.matrixFromAxisAngle(prevDesc.approach, theta[0])[0:3,0:3]

    approach = prevDesc.approach
    axis = dot(R, prevDesc.axis)
    center = prevDesc.center

    T = hand_descriptor.PoseFromApproachAxisCenter(approach, axis, center)
    desc = HandDescriptor(T)

    return action, desc
def extract_hitch(rgb, depth, T_w_k, dir=None, radius=0.016, length =0.215, height_range=[0.70,0.80]):
    """
    template match to find the hitch in the picture, 
    get the xyz at those points using the depth image,
    extend the points down to aid in tps.
    """
    template_fname = osp.join(hd_data_dir, 'hitch_template.jpg')
    template = cv2.imread(template_fname)
    h,w,_    = template.shape
    res = cv2.matchTemplate(rgb, template, cv2.TM_CCORR_NORMED)
    _, _, _, max_loc = cv2.minMaxLoc(res)

    top_left     = max_loc
    bottom_right = (top_left[0]+w, top_left[1]+h)

    if DEBUG_PLOTS:
        cv2.rectangle(rgb,top_left, bottom_right, (255,0,0), thickness=2)
        cv2.imshow("hitch detect", rgb)
        cv2.waitKey()

    # now get all points in a window around the hitch loc:
    xyz_k = clouds.depth_to_xyz(depth, asus_xtion_pro_f)
    xyz_w = xyz_k.dot(T_w_k[:3,:3].T) + T_w_k[:3,3][None,None,:]
    
    hitch_pts = xyz_w[top_left[1]:bottom_right[1], top_left[0]:bottom_right[0],:]
    height_mask = (hitch_pts[:,:,2] > height_range[0]) & (hitch_pts[:,:,2] < height_range[1])
    hitch_pts = hitch_pts[height_mask]

    ## add pts along the rod:
    center_xyz = np.median(hitch_pts, axis=0)
    ang = np.linspace(0, 2*np.pi, 30)
    circ_pts = radius*np.c_[np.cos(ang), np.sin(ang)] + center_xyz[None,:2]
    circ_zs  = np.linspace(center_xyz[2], length+center_xyz[2], 30)

    rod_pts  = np.empty((0,3))
    for z in circ_zs:
        rod_pts = np.r_[rod_pts, np.c_[circ_pts, z*np.ones((len(circ_pts),1))] ]
    
    all_pts = np.r_[hitch_pts, rod_pts]
    if dir is None:
        return all_pts, center_xyz
    else:
        axis = np.cross([0,0,1], dir)
        angle = np.arccos(np.dot([0,0,1], dir))
        tfm = matrixFromAxisAngle(axis, angle)
        tfm[:3,3] = - tfm[:3,:3].dot(center_xyz) + center_xyz
        return np.asarray([tfm[:3,:3].dot(point) + tfm[:3,3] for point in all_pts]), center_xyz
    def Collides(self, config):
        x = config[0]
        y = config[1]
        theta = config[2]

        transform = matrixFromAxisAngle([0,0,theta])
        transform[0][3] = x
        transform[1][3] = y

        # Assigns Transform to Robot and Checks Collision
        with self.herb.env:
            self.robot.SetTransform(transform)
            # time.sleep(0.01)
            if self.herb.env.CheckCollision(self.robot,self.table) == True:
                return True
            else:
                return False
Beispiel #42
0
    def Rotate(self, angle_rad, execute=True, **kw_args):
        """
        Rotates the robot the desired distance
        @param angle_rad the number of radians to rotate
        @param timeout duration to wait for execution
        """
        with self.robot.GetEnv():
            start_pose = self.robot.GetTransform()
            
            relative_pose = openravepy.matrixFromAxisAngle([ 0., 0., angle_rad ])
            goal_pose = numpy.dot(start_pose, relative_pose)

        path = create_affine_trajectory(self.robot, [ start_pose, goal_pose ])
        if execute:
            return self.robot.ExecutePath(path, **kw_args)
        else:
            return path
Beispiel #43
0
    def generate_trajectories(self, iter_surface, vis = None):
        """ Method generate the coating trajectories. The trajectories are
        the intersection between two surfaces: the blade model, and the surface
        to be iterated, e.g. spheres. The algorithm
        follows the marching method, documentation available in:
        http://www.mathematik.tu-darmstadt.de/~ehartmann/cdgen0104.pdf
        page 94 intersection surface - surface.

        This is a data-intensive computing and might freeze your computer.

        Args:
            iter_surface: (@ref IterSurface) surface to be iterated, as mathtools.sphere.

        Returns:
            (float[n][6]) list of points and normals, intersection between two surfaces.
        """

        step = self.trajectory_step

        trajectories = deepcopy(self.trajectories)

        if not issubclass(iter_surface.__class__, mathtools.IterSurface):
            raise TypeError("Object is not a valid surface.")

        if self.model_iter_surface is not None:
            if iter_surface.name() != self.model_iter_surface.name():
                raise TypeError("Object iter_surface must have same type of model_iter_surface.")

        if not self.models:
            raise IndexError("Object models is empty. Load or create a model before generate trajectories.")

        self._blade.SetTransform(eye(4))
        self._blade.SetTransform(dot(self._blade.GetTransform(),
                                     matrixFromAxisAngle([0, -self.turbine.config.environment.blade_angle, 0]))
                                 )

        point_on_surfaces = self._compute_initial_point(iter_surface, trajectories)
        self.trajectory_iter_surface = iter_surface
        while iter_surface.criteria():
            model = self.select_model(point_on_surfaces)
            trajectories.append(self.draw_parallel(point_on_surfaces, model, iter_surface, step, vis))
            p0 = trajectories[-1][-1]
            iter_surface.update()
            point_on_surfaces = mathtools.curvepoint(model, iter_surface, p0[0:3])
            self.trajectories = trajectories
        return trajectories
    def observe_cloud2(self,
                       radius,
                       axis_upsample=0,
                       radius_sample=0,
                       angle_sample=0):
        axis_pts = self.rope.GetControlPoints()
        indices = range(len(axis_pts))
        if axis_upsample != 0:
            lengths = np.r_[0, self.rope.GetHalfHeights() * 2]
            summed_lengths = np.cumsum(lengths)
            assert len(lengths) == len(axis_pts)
            indices = np.interp(
                np.linspace(0, summed_lengths[-1],
                            axis_upsample * len(lengths)), summed_lengths,
                indices)
            axis_pts = math_utils.interp2d(
                np.linspace(0, summed_lengths[-1],
                            axis_upsample * len(lengths)), summed_lengths,
                axis_pts)

        half_heights = self.rope.GetHalfHeights()
        rotates = self.rope.GetRotations()
        translates = self.rope.GetTranslations()
        new_pts = []
        for r in range(1, radius_sample + 1):
            d = r * radius / float(radius_sample)
            for a in range(angle_sample):
                local_rotate = matrixFromAxisAngle(
                    [a / float(angle_sample) * 2 * np.pi, 0, 0])[:3, :3]
                for index in indices:
                    node_id = np.min(
                        [np.floor(index),
                         len(self.rope.GetNodes()) - 1])
                    h = half_heights[node_id]
                    v = np.array([-h + 2 * h * (index - node_id), d, 0])
                    rotate = rotates[node_id]
                    translate = translates[node_id]
                    v = rotate.dot(local_rotate.dot(v)) + translate
                    new_pts.append(v)

        for pt in axis_pts:
            new_pts.append(pt)

        return np.array(new_pts)
def generate_random_pos(robot, obj_to_grasp = None):
    """Generate a random position for the robot within the boundaries of the 
    world.
    """
    
    T = robot.GetTransform()
    return T
    envmin, envmax = utils.get_environment_limits(robot.GetEnv(), robot)
    if obj_to_grasp is None:        
        max_x = envmax[0]
        max_y = envmax[1]
        max_th = np.pi
        min_x = envmin[0]
        min_y = envmin[1]
        min_th = np.pi        
    else:
        if type(obj_to_grasp) is openravepy.KinBody:
            obj_pos = obj_to_grasp.GetTransform()[:3,-1]
        else:
            obj_pos = obj_to_grasp
        max_x = min(obj_pos[0] + 1.2, envmax[0])
        max_y = min(obj_pos[1] + 1.2, envmax[1])
        min_x = max(obj_pos[0] - 1.2, envmin[0])
        min_y = max(obj_pos[1] - 1.2, envmin[1])
        
    
    #print "X ", (min_x, max_x), " Y: ", (min_y, max_y)
    x = np.random.uniform(min_x, max_x)
    y = np.random.uniform(min_y, max_y)
    z = T[2,3]
    
    if obj_to_grasp is None:
        th = np.random.uniform(min_th, max_th)
    else:
        robot_pos = T[:3,-1]
        facing_angle = np.arctan2(obj_pos[1] -y , obj_pos[0] - x)
        th = np.random.uniform(facing_angle - np.pi/4., 
                               facing_angle + np.pi/4.)
    
    #rotation
    T = openravepy.matrixFromAxisAngle([0,0,th])
    #translation
    T[:, -1] = [x, y, z, 1]    
    return T
Beispiel #46
0
def load_fake_data_segment(sim_env,
                           demofile,
                           fake_data_segment,
                           fake_data_transform,
                           set_robot_state=True,
                           floating=False):
    fake_seg = demofile[fake_data_segment]
    new_xyz = np.squeeze(fake_seg["cloud_xyz"])
    hmat = openravepy.matrixFromAxisAngle(fake_data_transform[3:6])
    hmat[:3, 3] = fake_data_transform[0:3]
    new_xyz = new_xyz.dot(hmat[:3, :3].T) + hmat[:3, 3][None, :]
    r2r = None
    if not floating:
        r2r = ros2rave.RosToRave(sim_env.robot,
                                 asarray(fake_seg["joint_states"]["name"]))
        if set_robot_state:
            r2r.set_values(sim_env.robot,
                           asarray(fake_seg["joint_states"]["position"][0]))
    return new_xyz, r2r
Beispiel #47
0
 def test_closest_arm_pose(self):
     env = ParamSetup.setup_env()
     # env.SetViewer('qtcoin')
     can = ParamSetup.setup_blue_can()
     robot = ParamSetup.setup_pr2()
     can.pose = np.array([[0,-.2,.8]]).T
     can_body = OpenRAVEBody(env, can.name, can.geom)
     can_body.set_pose(can.pose.flatten(), can.rotation.flatten())
     can_body.set_transparency(.7)
     robot.pose = np.array([[-.5,0,0]]).T
     robot_body = OpenRAVEBody(env, robot.name, robot.geom)
     robot_body.set_transparency(.7)
     robot_body.set_pose(robot.pose.flatten())
     robot_body.set_dof(robot.backHeight, robot.lArmPose.flatten(), robot.lGripper, robot.rArmPose.flatten(), robot.rGripper)
     can_trans = OpenRAVEBody.transform_from_obj_pose(can.pose, can.rotation)
     rot_mat = matrixFromAxisAngle([0, np.pi/2, 0])
     rot_mat = can_trans[:3, :3].dot(rot_mat[:3, :3])
     can_trans[:3, :3] = rot_mat
     torso_pose, arm_pose = sampling.get_torso_arm_ik(robot_body, can_trans, robot.rArmPose)
     robot_body.set_dof(robot.backHeight, robot.lArmPose.flatten(), robot.lGripper, arm_pose, robot.rGripper)
Beispiel #48
0
    def sample_ee_from_target(self, target):
        """
            Sample all possible EE Pose that pr2 can grasp with

            target: parameter of type Target
            return: list of tuple in the format of (ee_pos, ee_rot)
        """
        possible_ee_poses = []
        targ_pos, targ_rot = target.value.flatten(), target.rotation.flatten()
        ee_pos = targ_pos.copy()
        target_trans = OpenRAVEBody.transform_from_obj_pose(targ_pos, targ_rot)
        # rotate can's local z-axis by the amount of linear spacing between 0 to 2pi
        angle_range = np.linspace(0, np.pi * 2, num=SAMPLE_SIZE)
        for rot in angle_range:
            target_trans = OpenRAVEBody.transform_from_obj_pose(targ_pos, targ_rot)
            # rotate new ee_pose around can's rotation axis
            rot_mat = matrixFromAxisAngle([0, 0, rot])
            ee_trans = target_trans.dot(rot_mat)
            ee_rot = OpenRAVEBody.obj_pose_from_transform(ee_trans)[3:]
            possible_ee_poses.append((ee_pos, ee_rot))
        return possible_ee_poses
Beispiel #49
0
def SetCameraFromXML(viewer, xml):
    if isinstance(viewer, openravepy.Environment):
        viewer = viewer.GetViewer()

    import lxml.etree
    from StringIO import StringIO
    padded_xml = '<bogus>{0:s}</bogus>'.format(xml)
    camera_xml = lxml.etree.parse(StringIO(padded_xml))

    translation_raw = camera_xml.find('//camtrans').text
    axis_raw = camera_xml.find('//camrotationaxis').text
    focal_raw = camera_xml.find('//camfocal').text

    translation = numpy.loadtxt(StringIO(translation_raw))
    axis_angle = numpy.loadtxt(StringIO(axis_raw))
    axis_angle = axis_angle[3] * axis_angle[0:3] * (numpy.pi / 180)
    focal = float(focal_raw)

    transform = openravepy.matrixFromAxisAngle(axis_angle)
    transform[0:3, 3] = translation
    viewer.SetCamera(transform, focal)
Beispiel #50
0
    def setUp(self):
        table_height = 0.77
        helix_ang0 = 0
        helix_ang1 = 4*np.pi
        helix_radius = .2
        helix_center = np.r_[.6, 0]
        helix_height0 = table_height + .15
        helix_height1 = table_height + .15 + .3
        helix_length = np.linalg.norm(np.r_[(helix_ang1 - helix_ang0) * helix_radius, helix_height1 - helix_height0])
        num = np.round(helix_length/.02)
        helix_angs = np.linspace(helix_ang0, helix_ang1, num)
        helix_heights = np.linspace(helix_height0, helix_height1, num)
        init_rope_nodes = np.c_[helix_center + helix_radius * np.c_[np.cos(helix_angs), np.sin(helix_angs)], helix_heights]
        rope_params = sim_util.RopeParams()
        
        cyl_radius = 0.025
        cyl_height = 0.3
        cyl_pos0 = np.r_[.6, helix_radius, table_height + .25]
        cyl_pos1 = np.r_[.6, -helix_radius, table_height + .35]
        
        sim_objs = []
        sim_objs.append(BoxSimulationObject("table", [1, 0, table_height-.1], [.85, .85, .1], dynamic=False))
        sim_objs.append(RopeSimulationObject("rope", init_rope_nodes, rope_params))
        sim_objs.append(CylinderSimulationObject("cyl0", cyl_pos0, cyl_radius, cyl_height, dynamic=True))
        sim_objs.append(CylinderSimulationObject("cyl1", cyl_pos1, cyl_radius, cyl_height, dynamic=True))
        
        self.sim = DynamicSimulation()
        self.sim.add_objects(sim_objs)

        # rotate cylinders by 90 deg
        for i in range(2):
            bt_cyl = self.sim.bt_env.GetObjectByName('cyl%d' % i)
            T = openravepy.matrixFromAxisAngle(np.array([np.pi/2, 0, 0]))
            T[:3,3] = bt_cyl.GetTransform()[:3, 3]
            bt_cyl.SetTransform(T)  # SetTransform needs to be used in the Bullet object, not the openrave body
        self.sim.update()

        self.sim.add_objects([XmlSimulationObject("robots/pr2-beta-static.zae", dynamic=False)])
        self.sim.robot.SetDOFValues([0.25], [self.sim.robot.GetJoint('torso_lift_joint').GetJointIndex()])
        sim_util.reset_arms_to_side(self.sim)
def calc_Thk(xyz, rod):
    T_h_k = openravepy.matrixFromAxisAngle(rod)
    T_h_k[:3,3] += xyz
    return T_h_k
def main():
    import IPython
    demofile = h5py.File(args.h5file, 'r')
    trajoptpy.SetInteractive(args.interactive)


    if args.log:
        LOG_DIR = osp.join(osp.expanduser("~/Data/do_task_logs"), datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
        os.mkdir(LOG_DIR)
        LOG_COUNT = 0
                        

    if args.execution:
        rospy.init_node("exec_task",disable_signals=True)
        Globals.pr2 = PR2.PR2()
        Globals.env = Globals.pr2.env
        Globals.robot = Globals.pr2.robot
        
    else:
        Globals.env = openravepy.Environment()
        Globals.env.StopSimulation()
        Globals.env.Load("robots/pr2-beta-static.zae")    
        Globals.robot = Globals.env.GetRobots()[0]

    if not args.fake_data_segment:
        grabber = cloudprocpy.CloudGrabber()
        grabber.startRGBD()

    Globals.viewer = trajoptpy.GetViewer(Globals.env)

    #####################

    while True:
        
    
        redprint("Acquire point cloud")
        if args.fake_data_segment:
            fake_seg = demofile[args.fake_data_segment]
            new_xyz = np.squeeze(fake_seg["cloud_xyz"])
            hmat = openravepy.matrixFromAxisAngle(args.fake_data_transform[3:6])
            hmat[:3,3] = args.fake_data_transform[0:3]
            new_xyz = new_xyz.dot(hmat[:3,:3].T) + hmat[:3,3][None,:]
            r2r = ros2rave.RosToRave(Globals.robot, asarray(fake_seg["joint_states"]["name"]))
            r2r.set_values(Globals.robot, asarray(fake_seg["joint_states"]["position"][0]))
            #Globals.pr2.head.set_pan_tilt(0,1.2)
            #Globals.pr2.rarm.goto_posture('side')
            #Globals.pr2.larm.goto_posture('side')            
            #Globals.pr2.join_all()
            #time.sleep(2)
        else:
            #Globals.pr2.head.set_pan_tilt(0,1.2)
            #Globals.pr2.rarm.goto_posture('side')
            #Globals.pr2.larm.goto_posture('side')            
            #Globals.pr2.join_all()
            #time.sleep(2)

            
            Globals.pr2.update_rave()
            
            rgb, depth = grabber.getRGBD()
            T_w_k = berkeley_pr2.get_kinect_transform(Globals.robot)
            new_xyz = cloud_proc_func(rgb, depth, T_w_k)
            print "got new xyz"
            redprint(new_xyz)
            #grab_end(new_xyz)

    
        if args.log:
            LOG_COUNT += 1
            import cv2
            cv2.imwrite(osp.join(LOG_DIR,"rgb%i.png"%LOG_COUNT), rgb)
            cv2.imwrite(osp.join(LOG_DIR,"depth%i.png"%LOG_COUNT), depth)
            np.save(osp.join(LOG_DIR,"xyz%i.npy"%LOG_COUNT), new_xyz)
        

        


        ################################    
        redprint("Finding closest demonstration")
        if args.select_manual:
            seg_name = find_closest_manual(demofile, new_xyz)
        else:
            seg_name = find_closest_auto(demofile, new_xyz)
        
        seg_info = demofile[seg_name]
        redprint("closest demo: %s"%(seg_name))
        if "done" in seg_name:
            redprint("DONE!")
            break
    
    
        if args.log:
            with open(osp.join(LOG_DIR,"neighbor%i.txt"%LOG_COUNT),"w") as fh: fh.write(seg_name) 
        ################################



        ### Old end effector forces at r_gripper_tool_frame (including torques)
        old_forces = seg_info['end_effector_forces'][:,0:3,:]
        old_torques = seg_info['end_effector_forces'][:,3:6,:]


        redprint("Generating end-effector trajectory")    

        handles = []
        old_xyz = np.squeeze(seg_info["cloud_xyz"])
       


        scaled_old_xyz, src_params = registration.unit_boxify(old_xyz)
        scaled_new_xyz, targ_params = registration.unit_boxify(new_xyz)        
        f,_ = registration.tps_rpm_bij(scaled_old_xyz, scaled_new_xyz, plot_cb = tpsrpm_plot_cb,
                                       plotting=5 if args.animation else 0,rot_reg=np.r_[1e-4,1e-4,1e-1], n_iter=50, reg_init=10, reg_final=.1)
        f = registration.unscale_tps(f, src_params, targ_params)
        
        old_xyz_transformed = f.transform_points(old_xyz)

        #handles.append(Globals.env.plot3(old_xyz_transformed,5, np.array([(0,0,1,1) for i in old_xyz_transformed])))

        handles.extend(plotting_openrave.draw_grid(Globals.env, f.transform_points, old_xyz.min(axis=0)-np.r_[0,0,.1], old_xyz.max(axis=0)+np.r_[0,0,.1], xres = .1, yres = .1, zres = .04))        

        link2eetraj = {}
        for lr in 'r':
            link_name = "%s_gripper_tool_frame"%lr
            old_ee_traj = asarray(seg_info[link_name]["hmat"])
            new_ee_traj = f.transform_hmats(old_ee_traj)
            link2eetraj[link_name] = new_ee_traj
            #print old_ee_traj
            old_ee_pos = old_ee_traj[:,0:3,3]
            #print old_ee_pos

            # End effector forces as oppossed to end effector trajectories
            dfdxs = f.compute_jacobian(old_ee_pos)
            old_eefs = []
            new_eefs = []
            for i in xrange(len(old_forces)):
                dfdx = np.matrix(dfdxs[i])
                old_force = np.matrix(old_forces[i])
                old_torque = np.matrix(old_torques[i])

                new_force = (dfdx * old_force)
                new_torque = (dfdx * old_torque)
                old_eefs.append(np.vstack((old_force, old_torque)))
                new_eefs.append(np.vstack((new_force, new_torque)))

            old_eefs = np.asarray(old_eefs)[:,:,0]
            new_eefs = np.asarray(new_eefs)[:,:,0]

            force_data = {}
            force_data['old_eefs'] = old_eefs
            force_data['new_eefs'] = new_eefs
            force_file = open("trial.pickle", 'wa')
            pickle.dump(force_data, force_file)
            force_file.close()
            new_ee_traj_x = new_ee_traj
            
        
        miniseg_starts, miniseg_ends = split_trajectory_by_gripper(seg_info)    
        success = True
        
        
        print colorize.colorize("mini segments:", "red"), miniseg_starts, miniseg_ends
        for (i_miniseg, (i_start, i_end)) in enumerate(zip(miniseg_starts, miniseg_ends)):
            
            if args.execution=="real": Globals.pr2.update_rave()


            ################################    
            redprint("Generating joint trajectory for segment %s, part %i"%(seg_name, i_miniseg))
            
            
            
            # figure out how we're gonna resample stuff
            lr2oldtraj = {}
            for lr in 'r':
                manip_name = {"l":"leftarm", "r":"rightarm"}[lr]
                old_joint_traj = asarray(seg_info[manip_name][i_start:i_end+1])
                #print (old_joint_traj[1:] - old_joint_traj[:-1]).ptp(axis=0), i_start, i_end
                #if arm_moved(old_joint_traj): NOT SURE WHY BUT THIS IS RETURNING FALSE
                lr2oldtraj[lr] = old_joint_traj

            if args.visualize:
                    r2r = ros2rave.RosToRave(Globals.robot, asarray(seg_info["joint_states"]["name"]))
                    r2r.set_values(Globals.robot, asarray(seg_info["joint_states"]["position"][0]))
                    for i in range(0, lr2oldtraj['r'].shape[0], 10):
                        handles = []
                        handles.append(Globals.env.plot3(new_xyz,5,np.array([(0,1,0,1) for x in new_xyz])))
                        handles.append(Globals.env.plot3(old_xyz,5,np.array([(1,0,0,1) for x in old_xyz])))
                        handles.append(Globals.env.drawlinestrip(old_ee_traj[:,:3,3], 2, (1,0,0,1)))
                        # Setting robot arm to joint trajectory
                        r_vals = lr2oldtraj['r'][i,:]
                        Globals.robot.SetDOFValues(r_vals, Globals.robot.GetManipulator('rightarm').GetArmIndices())
                        # Plotting forces from r_gripper_tool_frame
                        hmats = Globals.robot.GetLinkTransformations()
                        trans, rot = conv.hmat_to_trans_rot(hmats[-3])
                        f_start = np.array([0,0,0]) + trans
                        #IPython.embed()
                        f_end = np.array(old_forces[i].T)[0]/100 + trans
                        handles.append(Globals.env.drawlinestrip(np.array([f_start, f_end]), 10, (1,0,0,1)))
                        
                        redprint(i)
                        Globals.viewer.Step()
            if len(lr2oldtraj) > 0:
                old_total_traj = np.concatenate(lr2oldtraj.values(), 1)
                JOINT_LENGTH_PER_STEP = .1
                # FIRST RESAMPLING HAPPENS HERE: JOINT_LENGTH
                _, timesteps_rs = unif_resample(old_total_traj, JOINT_LENGTH_PER_STEP) # Timesteps only, can use to inter eefs for first time
            ####
            new_eefs_segment = asarray(new_eefs[i_start:i_end+1,:]) # Extract miniseg, and re-sample

            if args.no_ds:
                new_eefs_segment_rs = new_eefs_segment
            else:
                new_eefs_segment_rs = mu.interp2d(timesteps_rs, np.arange(len(new_eefs_segment)), new_eefs_segment)
        
            ### Generate fullbody traj
            bodypart2traj = {}
            for (lr,old_joint_traj) in lr2oldtraj.items():

                manip_name = {"l":"leftarm", "r":"rightarm"}[lr]
                ee_link_name = "%s_gripper_tool_frame"%lr
                new_ee_traj = link2eetraj[ee_link_name][i_start:i_end+1]
                if args.no_ds:
                    old_joint_traj_rs = old_joint_traj
                    new_ee_traj_rs = new_ee_traj
                    ds_inds = np.arange(0, len(new_ee_traj_rs), args.trajopt_ds)
                    new_ee_traj_rs = new_ee_traj_rs[ds_inds]
                    
                    old_joint_traj_rs = old_joint_traj_rs[ds_inds]
                    new_joint_traj = planning.plan_follow_traj(Globals.robot, manip_name,
                        Globals.robot.GetLink(ee_link_name), new_ee_traj_rs,old_joint_traj_rs)
                    new_joint_traj = mu.interp2d(np.arange(len(old_joint_traj)), np.arange(0, len(new_joint_traj) * args.trajopt_ds, args.trajopt_ds), new_joint_traj)
                else:
                    old_joint_traj_rs = mu.interp2d(timesteps_rs, np.arange(len(old_joint_traj)), old_joint_traj)
                    new_ee_traj_rs = resampling.interp_hmats(timesteps_rs, np.arange(len(new_ee_traj)), new_ee_traj)
                    new_joint_traj = planning.plan_follow_traj(Globals.robot, manip_name,
                        Globals.robot.GetLink(ee_link_name), new_ee_traj_rs,old_joint_traj_rs)
                if args.execution: Globals.pr2.update_rave()
                part_name = {"l":"larm", "r":"rarm"}[lr]
                bodypart2traj[part_name] = new_joint_traj
                redprint("Joint trajectory has length: " + str(len(bodypart2traj[part_name])))
                
            redprint("Executing joint trajectory for segment %s, part %i using arms '%s'"%(seg_name, i_miniseg, bodypart2traj.keys()))


            redprint("Press enter to set gripper")
            raw_input()

            #for lr in 'r':
            #    set_gripper_maybesim(lr, binarize_gripper(seg_info["%s_gripper_joint"%lr][i_start]))

            if args.pid:

                if not args.fake_data_segment: # If running on PR2, add initial state as waypoint and rough interpolate
                    # Add initial position
                    (arm_position, arm_velocity, arm_effort) = robot_states.call_return_joint_states(robot_states.arm_joint_names)
                    traj_length = bodypart2traj['rarm'].shape[0]
                    complete_joint_traj = np.zeros((traj_length+1, 7)) # To include initial state as a way point
                    complete_joint_traj[1:,:] = bodypart2traj['rarm']
                    complete_joint_traj[0,:] = arm_position
                    bodypart2traj['rarm'] = complete_joint_traj

                    # Add initial eff
                    J = np.matrix(np.resize(np.array(robot_states.call_return_jacobian()), (6, 7))) # Jacobian
                    eff = robot_states.compute_end_effector_force(J, arm_effort).T
                    eff =  np.array(eff)[0]
                    init_force = eff[:3]
                    complete_force_traj = np.zeros((traj_length+1, 3))
                    complete_force_traj[1:,:] = new_eefs_segment_rs
                    complete_force_traj[0,:] = init_force

                else:
                    complete_force_traj = new_eefs_segment_rs
                # SECOND RESAMPLING HAPPENS HERE: JOINT VELOCITIES
                if args.no_ds:
                    traj = bodypart2traj['rarm']
                    stamps = asarray(seg_info['stamps'])
                    times = np.array([stamps[i_end] - stamps[i_start]])
                    F = complete_force_traj
                else:
                    times, times_up, traj = pr2_trajectories.return_timed_traj(Globals.pr2, bodypart2traj) # use times and times_up to interpolate the second time
                    F = mu.interp2d(times_up, times, complete_force_traj)
                
                #Globals.robot.SetDOFValues(asarray(fake_seg["joint_states"]["position"][0]))
                if args.visualize:
                    r2r = ros2rave.RosToRave(Globals.robot, asarray(seg_info["joint_states"]["name"]))
                    r2r.set_values(Globals.robot, asarray(seg_info["joint_states"]["position"][0]))
                    for i in range(0, traj.shape[0], 10):
                        handles = []
                        handles.append(Globals.env.plot3(new_xyz,5,np.array([(0,1,0,1) for x in new_xyz])))
                        handles.append(Globals.env.plot3(old_xyz,5,np.array([(1,0,0,1) for x in old_xyz])))
                        handles.append(Globals.env.drawlinestrip(old_ee_traj[:,:3,3], 2, (1,0,0,1)))
                        handles.append(Globals.env.drawlinestrip(new_ee_traj[:,:3,3], 2, (0,1,0,1)))
                        handles.append(Globals.env.drawlinestrip(new_ee_traj_rs[:,:3,3], 2, (0,0,1,1)))
                        # Setting robot arm to joint trajectory
                        r_vals = traj[i,:]
                        Globals.robot.SetDOFValues(r_vals, Globals.robot.GetManipulator('rightarm').GetArmIndices())
                        
                        # Plotting forces from r_gripper_tool_frame
                        hmats = Globals.robot.GetLinkTransformations()
                        trans, rot = conv.hmat_to_trans_rot(hmats[-3])
                        f_start = np.array([0,0,0]) + trans
                        f_end = np.array(old_forces[i].T)[0]/100 + trans
                        handles.append(Globals.env.drawlinestrip(np.array([f_start, f_end]), 10, (1,0,0,1)))
                        
                        redprint(i)
                        Globals.viewer.Step()






                qs = np.array(np.matrix(traj).T) # 7 x n
                qs = np.resize(traj, (1, qs.shape[1]*7))[0] #resize the data to 1x7n (n being the number of steps)
                F = np.array(np.matrix(F).T) # 6 x n
                F = np.resize(F, (1, F.shape[1]*6))[0] #resize the data to 1x3n


                # Controller code
                allCs = []
                x = np.ones(6) * 1 
                v = np.ones(6) * 1e-3
                a = np.ones(6) * 1e-6
                Ct = np.diag(np.hstack((x, v, a)))
                num_steps = i_end - i_start + 1
                #need to figure out the stamps correctly
                stamps = asarray(seg_info['stamps'][i_start:i_end+1])
                for t in range(num_steps-1, -1, -1):
                    allCs.append(Ct)
                m = np.ones(6)
                Kps = []
                Kvs = []
                Ks = []
                Qt = None
                Vs = []
                for t in range(num_steps-1, -1, -1):
                    if Qt is None:
                        Qt = allCs[t]
                    else:
                        Ft = np.zeros((12, 18))
                        for j in range(12):
                            Ft[j][j] = 1.0
                        deltat = abs(stamps[t+1] - stamps[t])
                        #print(deltat)
                        for j in range(6):
                            Ft[j][j+6] = deltat
                        for j in range(6):
                            Ft[j+6][j+12] = deltat/m[j]  
                        for j in range(6):
                            Ft[j][j+12] = ((deltat)**2)/m[j]
                        Qt = allCs[t] + (np.transpose(Ft).dot(Vs[num_steps-2-t]).dot(Ft))
                    Qxx = Qt[0:12, 0:12]
                    Quu = Qt[12:18, 12:18]
                    Qxu = Qt[0:12, 12:18]
                    Qux = Qt[12:18, 0:12]
                    Quuinv = np.linalg.inv(Quu)
                    Vt = Qxx - Qxu.dot(Quuinv).dot(Qux)
                    Kt = -1*(Quuinv.dot(Qux))
                    Ks.append(Kt)
                    Kps.append(Kt[:, 0:6])
                    Kvs.append(Kt[:, 6:12])
                    Vs.append(Vt)

                Ks = Ks[::-1]
                Kps = Kps[::-1]
                Kvs = Kvs[::-1]



                JKpJ = []
                JKvJ = []
                toAddJkpJ = np.diag(np.asarray([-2400.0, -1200.0, -1000.0, -700.0, -300.0, -300.0, -300.0]))
                toAddJkvJ = np.diag(np.asarray([-18.0, -10.0, -6.0, -4.0, -6.0, -4.0, -4.0]))
                JacobianOlds = asarray(seg_info["jacobians"][i_start:i_end+1])
                for i in range(i_end- i_start + 1):
                    J = JacobianOlds[i]
                    psuedoJ = np.linalg.inv(np.transpose(J).dot(J)).dot(np.transpose(J))
                    #JKpJ.append(np.transpose(J).dot(Kps[i]).dot(J) + toAddJkpJ*0.001)
                    #JKvJ.append(np.transpose(J).dot(Kvs[i]).dot(J) + toAddJkvJ*0.001)
                    JKpJ.append(psuedoJ.dot(Kps[i]).dot(J) + toAddJkpJ*0.001)
                    JKvJ.append(psuedoJ.dot(Kvs[i]).dot(J) + toAddJkvJ*0.001)
                if args.useJK:
                    Kps = []
                    Kvs = []
                    for i in range(i_end- i_start + 1):
                        Kps.append(np.zeros((6,6)))
                        Kvs.append(np.zeros((6,6)))
                else:
                    JKpJ = []
                    JKvJ = []
                    for i in range(i_end- i_start + 1):
                        JKpJ.append(np.zeros((7,7)))
                        JKvJ.append(np.zeros((7,7)))



                Kps = np.asarray(Kps)
                Kvs = np.asarray(Kvs)
                JKpJ = np.asarray(JKpJ)
                JKvJ = np.asarray(JKvJ)

                IPython.embed()
                
                # # Temp Kps and Kvs for testing
                """
                toAddJkpJ = np.diag(np.asarray([-2400.0, -1200.0, -1000.0, -700.0, -300.0, -300.0, -300.0]))
                toAddJkvJ = np.diag(np.asarray([-18.0, -10.0, -6.0, -4.0, -6.0, -4.0, -4.0]))
                length = complete_force_traj.shape[0]
                JKpJ = np.asarray([toAddJkpJ for i in range(length)])
                JKpJ = np.resize(JKpJ, (1, 49*length))[0]

                JKvJ = np.asarray([toAddJkvJ for i in range(length)])
                JKvJ = np.resize(JKvJ, (1, 49*length))[0]
                """

                # [traj, Kp, Kv, F, use_force, seconds]
                
                Kps = np.resize(Kps, (1, 36 * num_steps))[0]
                Kvs = np.resize(Kvs, (1, 36 * num_steps))[0]

                
                JKpJ = np.resize(JKpJ, (1, 49 * num_steps))[0]
                JKvJ = np.resize(JKvJ, (1, 49 * num_steps))[0]
                
                data = np.zeros((1, len(qs) + len(JKpJ) + len(JKvJ) + len(F) + len(Kps) + len(Kvs) + 2))
                data[0][0:len(qs)] = qs
                data[0][len(qs):len(qs)+len(JKpJ)] = JKpJ
                data[0][len(qs)+len(JKpJ):len(qs)+len(JKpJ)+len(JKvJ)] = JKvJ
                data[0][len(qs)+len(JKpJ)+len(JKvJ):len(qs)+len(JKpJ)+len(JKvJ)+len(F)] = F
                data[0][len(qs)+len(JKpJ)+len(JKvJ)+len(F):len(qs)+len(JKpJ)+len(JKvJ)+len(F)+len(Kps)] = Kps
                data[0][len(qs)+len(JKpJ)+len(JKvJ)+len(F)+len(Kps):len(qs)+len(JKpJ)+len(JKvJ)+len(F)+len(Kps)+len(Kvs)] = Kvs
                data[0][-2] = args.use_force
                data[0][-1] = stamps[i_end] - stamps[i_start]
                msg = Float64MultiArray()
                msg.data = data[0].tolist()
                pub = rospy.Publisher("/controller_data", Float64MultiArray)
                redprint("Press enter to start trajectory")
                raw_input()
                time.sleep(1)
                pub.publish(msg)
                time.sleep(1)
            else:
                #if not success: break
                
                if len(bodypart2traj) > 0:
                    exec_traj_maybesim(bodypart2traj)
def track_video_frames_online(video_dir, T_w_k, init_tfm, table_plane, ref_image = None, rope_params = None):    
    video_stamps = np.loadtxt(osp.join(video_dir,demo_names.stamps_name))
    
    env = openravepy.Environment()
    
    stdev = np.array([])
    
    
    
    rgb = cv2.imread(osp.join(video_dir,demo_names.rgb_name%0))
    rgb = color_match.match(ref_image, rgb)
    assert rgb is not None
    depth = cv2.imread(osp.join(video_dir,demo_names.depth_name%0), 2)
    assert depth is not None
    rope_xyz = extract_red(rgb, depth, T_w_k)

    table_dir = np.array([table_plane[0], table_plane[1], table_plane[2]])
    table_dir = table_dir / np.linalg.norm(table_dir)
    
    table_dir = init_tfm[:3,:3].dot(table_dir)    
    if np.dot([0,0,1], table_dir) < 0:
        table_dir = -table_dir
    table_axis = np.cross(table_dir, [0,0,1])
    table_angle = np.arccos(np.dot([0,0,1], table_dir))
    table_tfm = openravepy.matrixFromAxisAngle(table_axis, table_angle)
    table_center_xyz = np.mean(rope_xyz, axis=0)
        
    table_tfm[:3,3] = - table_tfm[:3,:3].dot(table_center_xyz) + table_center_xyz
    init_tfm = table_tfm.dot(init_tfm)
    
    tracked_nodes = None
    
    for (index, stamp) in zip(range(len(video_stamps)), video_stamps):
        print index, stamp
        rgb = cv2.imread(osp.join(video_dir,demo_names.rgb_name%index))
        rgb = color_match.match(ref_image, rgb)
        assert rgb is not None
        depth = cv2.imread(osp.join(video_dir,demo_names.depth_name%index), 2)
        assert depth is not None
        color_cloud = extract_rope_tracking(rgb, depth, T_w_k) # color_cloud is at first in camera frame
        
        
        #print "cloud in camera frame"
        #print "==================================="
        #print color_cloud[:, :3]

        
    
        color_cloud = clouds.downsample_colored(color_cloud, .01)
        color_cloud[:,:3] = color_cloud[:,:3].dot(init_tfm[:3,:3].T) + init_tfm[:3,3][None,:] # color_cloud now is in global frame
        
        raw_color_cloud = np.array(color_cloud)
        ##########################################
        ### remove the shadow points on the desk
        ##########################################
        color_cloud = remove_shadow(color_cloud)
        
        if tracked_nodes is not None:
            color_cloud = rope_shape_filter(tracked_nodes, color_cloud)
             
        if index == 0:
            rope_xyz = extract_red(rgb, depth, T_w_k)
            rope_xyz = clouds.downsample(rope_xyz, .01)
            rope_xyz = rope_xyz.dot(init_tfm[:3, :3].T) + init_tfm[:3,3][None,:]
            rope_nodes = rope_initialization.find_path_through_point_cloud(rope_xyz) # rope_nodes and rope_xyz are in global frame
                        
            # print rope_nodes
            
            table_height = rope_xyz[:,2].min() - .02
            table_xml = make_table_xml(translation=[1, 0, table_height], extents=[.85, .55, .01])
            env.LoadData(table_xml)            
            
            bulletsimpy.sim_params.scale = 10
            bulletsimpy.sim_params.maxSubSteps = 200
            if rope_params is None:
                rope_params = bulletsimpy.CapsuleRopeParams()
                rope_params.radius = 0.005
                #angStiffness: a rope with a higher angular stiffness seems to have more resistance to bending.
                #orig self.rope_params.angStiffness = .1
                rope_params.angStiffness = .1
                #A higher angular damping causes the ropes joints to change angle slower.
                #This can cause the rope to be dragged at an angle by the arm in the air, instead of falling straight.
                #orig self.rope_params.angDamping = 1
                rope_params.angDamping = 1
                #orig self.rope_params.linDamping = .75
                #Not sure what linear damping is, but it seems to limit the linear accelertion of centers of masses.
                rope_params.linDamping = .75
                #Angular limit seems to be the minimum angle at which the rope joints can bend.
                #A higher angular limit increases the minimum radius of curvature of the rope.
                rope_params.angLimit = .4
                #TODO--Find out what the linStopErp is
                #This could be the tolerance for error when the joint is at or near the joint limit
                rope_params.linStopErp = .2  
            
            bt_env = bulletsimpy.BulletEnvironment(env, [])
            bt_env.SetGravity([0,0,-0.1])
            rope = bulletsimpy.CapsuleRope(bt_env, 'rope', rope_nodes, rope_params)
            
            continue
        
        
        
 #==============================================================================
 #       rope_nodes = rope.GetNodes()
 #       #print "rope nodes in camera frame"
 #       R = init_tfm[:3,:3].T
 #       t = - R.dot(init_tfm[:3,3])
 #       rope_in_camera_frame = rope_nodes.dot(R.T) + t[None,:]
 #       #print rope_in_camera_frame
 #       uvs = XYZ_to_xy(rope_in_camera_frame[:,0], rope_in_camera_frame[:,1], rope_in_camera_frame[:,2], asus_xtion_pro_f)
 #       uvs = np.vstack(uvs)
 #       #print uvs
 #       #print "uvs"
 #       uvs = uvs.astype(int)
 #       
 #       n_rope_nodes = len(rope_nodes)
 #       
 #       DEPTH_OCCLUSION_DIST = .03
 #       occ_dist = DEPTH_OCCLUSION_DIST
 #       
 #       vis = np.ones(n_rope_nodes)
 #       
 #       rope_depth_in_camera = np.array(rope_in_camera_frame)
 #       depth_xyz = depth_to_xyz(depth, asus_xtion_pro_f)
 # 
 #       for i in range(n_rope_nodes):
 #           u = uvs[0, i]
 #           v = uvs[1, i]
 #           
 #           neighbor_radius = 10;
 #           v_range = [max(0, v-neighbor_radius), v+neighbor_radius+1]
 #           u_range = [max(0, u-neighbor_radius), u+neighbor_radius+1]
 #           
 #           xyzs = depth_xyz[v_range[0]:v_range[1], u_range[0]:u_range[1]]
 #                       
 #           xyzs = np.reshape(xyzs, (xyzs.shape[0]*xyzs.shape[1], xyzs.shape[2]))
 #           dists_to_origin = np.linalg.norm(xyzs, axis=1)
 #           
 #           dists_to_origin = dists_to_origin[np.isfinite(dists_to_origin)]
 #           
 #           #print dists_to_origin
 #           min_dist_to_origin = np.min(dists_to_origin)
 #                       
 #           print v, u, min_dist_to_origin, np.linalg.norm(rope_in_camera_frame[i])
 #                                   
 #           if min_dist_to_origin + occ_dist > np.linalg.norm(rope_in_camera_frame[i]):
 #               vis[i] = 1
 #           else:
 #               vis[i] = 0
 #               
 #       #print "vis result"
 #       #print vis
 #       
 #       rope_depth_in_global = rope_depth_in_camera.dot(init_tfm[:3,:3].T) + init_tfm[:3,3][None,:]
 #==============================================================================
        
        
        depth_cloud = extract_cloud(depth, T_w_k)
        depth_cloud[:,:3] = depth_cloud[:,:3].dot(init_tfm[:3,:3].T) + init_tfm[:3,3][None,:] # depth_cloud now is in global frame


        [tracked_nodes, new_stdev] = bulletsimpy.py_tracking(rope, bt_env, init_tfm, color_cloud, rgb, depth, 5, stdev)
        stdev = new_stdev
        
        #print tracked_nodes

        #if index % 10 != 0:
        #    continue
        
        print index
        
        
        xx, yy = np.mgrid[-1:3, -1:3]
        zz = np.ones(xx.shape) * table_height
        table_cloud = [xx, yy, zz]
        
        fig.clf()
        ax = fig.gca(projection='3d')
        ax.set_autoscale_on(False)     
        
        print init_tfm[:,3]
        ax.plot(depth_cloud[:,0], depth_cloud[:,1], depth_cloud[:,2], 'go', alpha=0.1)
        ax.plot(color_cloud[:,0], color_cloud[:,1], color_cloud[:,2]-0.1, 'go')
        ax.plot(raw_color_cloud[:,0], raw_color_cloud[:,1], raw_color_cloud[:,2] -0.2, 'ro')
        #ax.plot(rope_depth_in_global[:,0], rope_depth_in_global[:,1], rope_depth_in_global[:,2], 'ro')
        ax.plot_surface(table_cloud[0], table_cloud[1], table_cloud[2], color = (0,1,0,0.5))
        ax.plot(tracked_nodes[:,0], tracked_nodes[:,1], tracked_nodes[:,2], 'bo')
        ax.plot([init_tfm[0,3]], [init_tfm[1,3]], [init_tfm[2,3]], 'ro')   


        fig.show()
        raw_input()
def main():
    import IPython
    demofile = h5py.File(args.h5file, 'r')
    trajoptpy.SetInteractive(args.interactive)


    if args.log:
        LOG_DIR = osp.join(osp.expanduser("~/Data/do_task_logs"), datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
        os.mkdir(LOG_DIR)
        LOG_COUNT = 0
                        

    if args.execution:
        rospy.init_node("exec_task",disable_signals=True)
        Globals.pr2 = PR2.PR2()
        Globals.env = Globals.pr2.env
        Globals.robot = Globals.pr2.robot
        
    else:
        Globals.env = openravepy.Environment()
        Globals.env.StopSimulation()
        Globals.env.Load("robots/pr2-beta-static.zae")    
        Globals.robot = Globals.env.GetRobots()[0]

    if not args.fake_data_segment:
        grabber = cloudprocpy.CloudGrabber()
        grabber.startRGBD()

    Globals.viewer = trajoptpy.GetViewer(Globals.env)

    #####################

    while True:
        
    
        redprint("Acquire point cloud")
        if args.fake_data_segment:
            fake_seg = demofile[args.fake_data_segment]
            new_xyz = np.squeeze(fake_seg["cloud_xyz"])
            hmat = openravepy.matrixFromAxisAngle(args.fake_data_transform[3:6])
            hmat[:3,3] = args.fake_data_transform[0:3]
            new_xyz = new_xyz.dot(hmat[:3,:3].T) + hmat[:3,3][None,:]
            r2r = ros2rave.RosToRave(Globals.robot, asarray(fake_seg["joint_states"]["name"]))
            r2r.set_values(Globals.robot, asarray(fake_seg["joint_states"]["position"][0]))
            #Globals.pr2.head.set_pan_tilt(0,1.2)
            #Globals.pr2.rarm.goto_posture('side')
            #Globals.pr2.larm.goto_posture('side')            
            #Globals.pr2.join_all()
            #time.sleep(2)
        else:
            #Globals.pr2.head.set_pan_tilt(0,1.2)
            #Globals.pr2.rarm.goto_posture('side')
            #Globals.pr2.larm.goto_posture('side')            
            #Globals.pr2.join_all()
            #time.sleep(2)

            
            Globals.pr2.update_rave()
            
            rgb, depth = grabber.getRGBD()
            T_w_k = berkeley_pr2.get_kinect_transform(Globals.robot)
            new_xyz = cloud_proc_func(rgb, depth, T_w_k)
            print "got new xyz"
            redprint(new_xyz)
            #grab_end(new_xyz)

    
        if args.log:
            LOG_COUNT += 1
            import cv2
            cv2.imwrite(osp.join(LOG_DIR,"rgb%i.png"%LOG_COUNT), rgb)
            cv2.imwrite(osp.join(LOG_DIR,"depth%i.png"%LOG_COUNT), depth)
            np.save(osp.join(LOG_DIR,"xyz%i.npy"%LOG_COUNT), new_xyz)
        

        


        ################################    
        redprint("Finding closest demonstration")
        if args.select_manual:
            seg_name = find_closest_manual(demofile, new_xyz)
        else:
            seg_name = find_closest_auto(demofile, new_xyz)
        
        seg_info = demofile[seg_name]
        redprint("closest demo: %s"%(seg_name))
        if "done" in seg_name:
            redprint("DONE!")
            break
    
    
        if args.log:
            with open(osp.join(LOG_DIR,"neighbor%i.txt"%LOG_COUNT),"w") as fh: fh.write(seg_name) 
        ################################



        ### Old end effector forces at r_gripper_tool_frame (eliminating the torques for now)
        old_eefs = seg_info['end_effector_forces'][:,0:3,:]


        redprint("Generating end-effector trajectory")    

        handles = []
        old_xyz = np.squeeze(seg_info["cloud_xyz"])
       


        scaled_old_xyz, src_params = registration.unit_boxify(old_xyz)
        scaled_new_xyz, targ_params = registration.unit_boxify(new_xyz)        
        f,_ = registration.tps_rpm_bij(scaled_old_xyz, scaled_new_xyz, plot_cb = tpsrpm_plot_cb,
                                       plotting=5 if args.animation else 0,rot_reg=np.r_[1e-4,1e-4,1e-1], n_iter=50, reg_init=10, reg_final=.1)
        f = registration.unscale_tps(f, src_params, targ_params)
        
        old_xyz_transformed = f.transform_points(old_xyz)

        #handles.append(Globals.env.plot3(old_xyz_transformed,5, np.array([(0,0,1,1) for i in old_xyz_transformed])))

        handles.extend(plotting_openrave.draw_grid(Globals.env, f.transform_points, old_xyz.min(axis=0)-np.r_[0,0,.1], old_xyz.max(axis=0)+np.r_[0,0,.1], xres = .1, yres = .1, zres = .04))        

        link2eetraj = {}
        for lr in 'r':
            link_name = "%s_gripper_tool_frame"%lr
            old_ee_traj = asarray(seg_info[link_name]["hmat"])
            new_ee_traj = f.transform_hmats(old_ee_traj)
            link2eetraj[link_name] = new_ee_traj
            #print old_ee_traj
            old_ee_pos = old_ee_traj[:,0:3,3]
            #print old_ee_pos

            # End effector forces as oppossed to end effector trajectories
            dfdxs = f.compute_jacobian(old_ee_pos)
            new_eefs = []
            for i in xrange(len(old_eefs)):
                dfdx = np.matrix(dfdxs[i])
                old_eef = np.matrix(old_eefs[i])
                new_eefs.append(dfdx * old_eef)
            old_eefs = asarray(old_eefs)[:,:,0]
            new_eefs = asarray(new_eefs)[:,:,0]

            force_data = {}
            force_data['old_eefs'] = old_eefs
            force_data['new_eefs'] = new_eefs
            force_file = open("trial.pickle", 'wa')
            pickle.dump(force_data, force_file)
            force_file.close()
            new_ee_traj_x = new_ee_traj
            
        
        miniseg_starts, miniseg_ends = split_trajectory_by_gripper(seg_info)    
        success = True
        
        
        print colorize.colorize("mini segments:", "red"), miniseg_starts, miniseg_ends
        for (i_miniseg, (i_start, i_end)) in enumerate(zip(miniseg_starts, miniseg_ends)):
            
            if args.execution=="real": Globals.pr2.update_rave()


            ################################    
            redprint("Generating joint trajectory for segment %s, part %i"%(seg_name, i_miniseg))
            
            
            
            # figure out how we're gonna resample stuff
            lr2oldtraj = {}
            for lr in 'r':
                manip_name = {"l":"leftarm", "r":"rightarm"}[lr]
                old_joint_traj = asarray(seg_info[manip_name][i_start:i_end+1])
                #print (old_joint_traj[1:] - old_joint_traj[:-1]).ptp(axis=0), i_start, i_end
                #if arm_moved(old_joint_traj): NOT SURE WHY BUT THIS IS RETURNING FALSE
                lr2oldtraj[lr] = old_joint_traj

            if args.visualize:
                    r2r = ros2rave.RosToRave(Globals.robot, asarray(seg_info["joint_states"]["name"]))
                    r2r.set_values(Globals.robot, asarray(seg_info["joint_states"]["position"][0]))
                    for i in range(0, lr2oldtraj['r'].shape[0], 10):
                        handles = []
                        handles.append(Globals.env.plot3(new_xyz,5,np.array([(0,1,0,1) for x in new_xyz])))
                        handles.append(Globals.env.plot3(old_xyz,5,np.array([(1,0,0,1) for x in old_xyz])))
                        handles.append(Globals.env.drawlinestrip(old_ee_traj[:,:3,3], 2, (1,0,0,1)))
                        # Setting robot arm to joint trajectory
                        r_vals = lr2oldtraj['r'][i,:]
                        Globals.robot.SetDOFValues(r_vals, Globals.robot.GetManipulator('rightarm').GetArmIndices())
                        # Plotting forces from r_gripper_tool_frame
                        hmats = Globals.robot.GetLinkTransformations()
                        trans, rot = conv.hmat_to_trans_rot(hmats[-3])
                        f_start = np.array([0,0,0]) + trans
                        f_end = np.array(old_eefs[i])/100 + trans
                        handles.append(Globals.env.drawlinestrip(np.array([f_start, f_end]), 10, (1,0,0,1)))
                        
                        redprint(i)
                        Globals.viewer.Step()
            if len(lr2oldtraj) > 0:
                old_total_traj = np.concatenate(lr2oldtraj.values(), 1)
                JOINT_LENGTH_PER_STEP = .1
                # FIRST RESAMPLING HAPPENS HERE: JOINT_LENGTH
                _, timesteps_rs = unif_resample(old_total_traj, JOINT_LENGTH_PER_STEP) # Timesteps only, can use to inter eefs for first time
            ####
            new_eefs_segment = asarray(new_eefs[i_start:i_end+1,:]) # Extract miniseg, and re-sample

            if args.no_ds:
                new_eefs_segment_rs = new_eefs_segment
            else:
                new_eefs_segment_rs = mu.interp2d(timesteps_rs, np.arange(len(new_eefs_segment)), new_eefs_segment)
        
            ### Generate fullbody traj
            bodypart2traj = {}
            for (lr,old_joint_traj) in lr2oldtraj.items():

                manip_name = {"l":"leftarm", "r":"rightarm"}[lr]
                ee_link_name = "%s_gripper_tool_frame"%lr
                new_ee_traj = link2eetraj[ee_link_name][i_start:i_end+1]
                if args.no_ds:
                    old_joint_traj_rs = old_joint_traj
                    new_ee_traj_rs = new_ee_traj
                    ds_inds = np.arange(0, len(new_ee_traj_rs), args.trajopt_ds)
                    new_ee_traj_rs = new_ee_traj_rs[ds_inds]
                    
                    old_joint_traj_rs = old_joint_traj_rs[ds_inds]
                    new_joint_traj = planning.plan_follow_traj(Globals.robot, manip_name,
                        Globals.robot.GetLink(ee_link_name), new_ee_traj_rs,old_joint_traj_rs)
                    new_joint_traj = mu.interp2d(np.arange(len(old_joint_traj)), np.arange(0, len(new_joint_traj) * args.trajopt_ds, args.trajopt_ds), new_joint_traj)
                else:
                    old_joint_traj_rs = mu.interp2d(timesteps_rs, np.arange(len(old_joint_traj)), old_joint_traj)
                    new_ee_traj_rs = resampling.interp_hmats(timesteps_rs, np.arange(len(new_ee_traj)), new_ee_traj)
                    new_joint_traj = planning.plan_follow_traj(Globals.robot, manip_name,
                        Globals.robot.GetLink(ee_link_name), new_ee_traj_rs,old_joint_traj_rs)
                if args.execution: Globals.pr2.update_rave()
                part_name = {"l":"larm", "r":"rarm"}[lr]
                bodypart2traj[part_name] = new_joint_traj
                redprint("Joint trajectory has length: " + str(len(bodypart2traj[part_name])))
                
            redprint("Executing joint trajectory for segment %s, part %i using arms '%s'"%(seg_name, i_miniseg, bodypart2traj.keys()))

            if args.pid:

                if not args.fake_data_segment: # If running on PR2, add initial state as waypoint and rough interpolate
                    # Add initial position
                    (arm_position, arm_velocity, arm_effort) = robot_states.call_return_joint_states(robot_states.arm_joint_names)
                    traj_length = bodypart2traj['rarm'].shape[0]
                    complete_joint_traj = np.zeros((traj_length+1, 7)) # To include initial state as a way point
                    complete_joint_traj[1:,:] = bodypart2traj['rarm']
                    complete_joint_traj[0,:] = arm_position
                    bodypart2traj['rarm'] = complete_joint_traj

                    # Add initial eff
                    J = np.matrix(np.resize(np.array(robot_states.call_return_jacobian()), (6, 7))) # Jacobian
                    eff = robot_states.compute_end_effector_force(J, arm_effort).T
                    eff =  np.array(eff)[0]
                    init_force = eff[:3]
                    complete_force_traj = np.zeros((traj_length+1, 3))
                    complete_force_traj[1:,:] = new_eefs_segment_rs
                    complete_force_traj[0,:] = init_force

                else:
                    complete_force_traj = new_eefs_segment_rs
                # SECOND RESAMPLING HAPPENS HERE: JOINT VELOCITIES
                if args.no_ds:
                    traj = bodypart2traj['rarm']
                    stamps = asarray(seg_info['stamps'])
                    times = np.array([stamps[i_end] - stamps[i_start]])
                    force = complete_force_traj
                else:
                    times, times_up, traj = pr2_trajectories.return_timed_traj(Globals.pr2, bodypart2traj) # use times and times_up to interpolate the second time
                    force = mu.interp2d(times_up, times, complete_force_traj)
                
                #Globals.robot.SetDOFValues(asarray(fake_seg["joint_states"]["position"][0]))
                if args.visualize:
                    r2r = ros2rave.RosToRave(Globals.robot, asarray(seg_info["joint_states"]["name"]))
                    r2r.set_values(Globals.robot, asarray(seg_info["joint_states"]["position"][0]))
                    for i in range(0, traj.shape[0], 10):
                        handles = []
                        handles.append(Globals.env.plot3(new_xyz,5,np.array([(0,1,0,1) for x in new_xyz])))
                        handles.append(Globals.env.plot3(old_xyz,5,np.array([(1,0,0,1) for x in old_xyz])))
                        handles.append(Globals.env.drawlinestrip(old_ee_traj[:,:3,3], 2, (1,0,0,1)))
                        handles.append(Globals.env.drawlinestrip(new_ee_traj[:,:3,3], 2, (0,1,0,1)))
                        handles.append(Globals.env.drawlinestrip(new_ee_traj_rs[:,:3,3], 2, (0,0,1,1)))
                        # Setting robot arm to joint trajectory
                        r_vals = traj[i,:]
                        Globals.robot.SetDOFValues(r_vals, Globals.robot.GetManipulator('rightarm').GetArmIndices())
                        
                        # Plotting forces from r_gripper_tool_frame
                        hmats = Globals.robot.GetLinkTransformations()
                        trans, rot = conv.hmat_to_trans_rot(hmats[-3])
                        f_start = np.array([0,0,0]) + trans
                        f_end = np.array(force[i])/100 + trans
                        handles.append(Globals.env.drawlinestrip(np.array([f_start, f_end]), 10, (1,0,0,1)))
                        
                        redprint(i)
                        Globals.viewer.Step()


                traj = np.array(np.matrix(traj).T) # 7 x n
                redprint(traj)

                traj = np.resize(traj, (1, traj.shape[1]*7)) #resize the data to 1x7n (n being the number of steps)
                force = np.array(np.matrix(force).T) # 3 x n
                force = np.resize(force, (1, force.shape[1]*3)) #resize the data to 1x3n
                #[traj, force, secs]
                traj_force_secs = np.zeros((1, traj.shape[1] + force.shape[1] + 2))
                traj_force_secs[0,0:traj.shape[1]] = traj
                traj_force_secs[0,traj.shape[1]:traj.shape[1]+force.shape[1]] = force
                traj_force_secs[0,traj.shape[1]+force.shape[1]] = times[len(times)-1]

                traj_force_secs[0,traj.shape[1]+force.shape[1]+1] = args.use_force
                IPython.embed()
                msg = Float64MultiArray()
                msg.data = traj_force_secs[0].tolist()
                pub = rospy.Publisher("/joint_positions_forces_secs", Float64MultiArray)
                redprint("Press enter to start trajectory")
                raw_input()
                time.sleep(1)
                pub.publish(msg)
                time.sleep(1)
            else:
                #if not success: break
                
                if len(bodypart2traj) > 0:
                    exec_traj_maybesim(bodypart2traj)
Beispiel #55
0
def main():

    demofile = h5py.File(args.h5file, 'r')
    
    trajoptpy.SetInteractive(args.interactive)


    if args.log:
        LOG_DIR = osp.join(osp.expanduser("~/Data/do_task_logs"), datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
        os.mkdir(LOG_DIR)
        LOG_COUNT = 0
                        

    if args.execution:
        rospy.init_node("exec_task",disable_signals=True)
        Globals.pr2 = PR2.PR2()
        Globals.env = Globals.pr2.env
        Globals.robot = Globals.pr2.robot
        
    else:
        Globals.env = openravepy.Environment()
        Globals.env.StopSimulation()
        Globals.env.Load("robots/pr2-beta-static.zae")    
        Globals.robot = Globals.env.GetRobots()[0]

    if not args.fake_data_segment:
        grabber = cloudprocpy.CloudGrabber()
        grabber.startRGBD()

    Globals.viewer = trajoptpy.GetViewer(Globals.env)

    #####################

    while True:
        
    
        redprint("Acquire point cloud")
        if args.fake_data_segment:
            fake_seg = demofile[args.fake_data_segment]
            new_xyz = np.squeeze(fake_seg["cloud_xyz"])
            hmat = openravepy.matrixFromAxisAngle(args.fake_data_transform[3:6])
            hmat[:3,3] = args.fake_data_transform[0:3]
            new_xyz = new_xyz.dot(hmat[:3,:3].T) + hmat[:3,3][None,:]
            r2r = ros2rave.RosToRave(Globals.robot, asarray(fake_seg["joint_states"]["name"]))
            r2r.set_values(Globals.robot, asarray(fake_seg["joint_states"]["position"][0]))
        else:
            Globals.pr2.head.set_pan_tilt(0,1.2)
            Globals.pr2.rarm.goto_posture('side')
            Globals.pr2.larm.goto_posture('side')            
            Globals.pr2.join_all()
            time.sleep(.5)

            
            Globals.pr2.update_rave()
            
            rgb, depth = grabber.getRGBD()
            T_w_k = berkeley_pr2.get_kinect_transform(Globals.robot)
            new_xyz = cloud_proc_func(rgb, depth, T_w_k)
    
            #grab_end(new_xyz)

    
        if args.log:
            LOG_COUNT += 1
            import cv2
            cv2.imwrite(osp.join(LOG_DIR,"rgb%i.png"%LOG_COUNT), rgb)
            cv2.imwrite(osp.join(LOG_DIR,"depth%i.png"%LOG_COUNT), depth)
            np.save(osp.join(LOG_DIR,"xyz%i.npy"%LOG_COUNT), new_xyz)
        
        ################################    
        redprint("Finding closest demonstration")
        if args.select_manual:
            seg_name = find_closest_manual(demofile, new_xyz)
        else:
            seg_name = find_closest_auto(demofile, new_xyz)
        
        seg_info = demofile[seg_name]
        redprint("closest demo: %s"%(seg_name))
        if "done" in seg_name:
            redprint("DONE!")
            break
    
    
        if args.log:
            with open(osp.join(LOG_DIR,"neighbor%i.txt"%LOG_COUNT),"w") as fh: fh.write(seg_name) 
        ################################

        redprint("Generating end-effector trajectory")    

        handles = []
        old_xyz = np.squeeze(seg_info["cloud_xyz"])
        handles.append(Globals.env.plot3(old_xyz,5, (1,0,0)))
        handles.append(Globals.env.plot3(new_xyz,5, (0,0,1)))


        scaled_old_xyz, src_params = registration.unit_boxify(old_xyz)
        scaled_new_xyz, targ_params = registration.unit_boxify(new_xyz)        
        f,_ = registration.tps_rpm_bij(scaled_old_xyz, scaled_new_xyz, plot_cb = tpsrpm_plot_cb,
                                       plotting=5 if args.animation else 0,rot_reg=np.r_[1e-4,1e-4,1e-1], n_iter=50, reg_init=10, reg_final=.1)
        f = registration.unscale_tps(f, src_params, targ_params)
        
        handles.extend(plotting_openrave.draw_grid(Globals.env, f.transform_points, old_xyz.min(axis=0)-np.r_[0,0,.1], old_xyz.max(axis=0)+np.r_[0,0,.1], xres = .1, yres = .1, zres = .04))        

        link2eetraj = {}
        for lr in 'lr':
            link_name = "%s_gripper_tool_frame"%lr
            old_ee_traj = asarray(seg_info[link_name]["hmat"])
            new_ee_traj = f.transform_hmats(old_ee_traj)
            link2eetraj[link_name] = new_ee_traj
            
            handles.append(Globals.env.drawlinestrip(old_ee_traj[:,:3,3], 2, (1,0,0,1)))
            handles.append(Globals.env.drawlinestrip(new_ee_traj[:,:3,3], 2, (0,1,0,1)))
    
        miniseg_starts, miniseg_ends = split_trajectory_by_gripper(seg_info)    
        success = True
        print colorize.colorize("mini segments:", "red"), miniseg_starts, miniseg_ends
        for (i_miniseg, (i_start, i_end)) in enumerate(zip(miniseg_starts, miniseg_ends)):
            
            if args.execution=="real": Globals.pr2.update_rave()


            ################################    
            redprint("Generating joint trajectory for segment %s, part %i"%(seg_name, i_miniseg))
            
            
            
            # figure out how we're gonna resample stuff
            lr2oldtraj = {}
            for lr in 'lr':
                manip_name = {"l":"leftarm", "r":"rightarm"}[lr]                 
                old_joint_traj = asarray(seg_info[manip_name][i_start:i_end+1])
                #print (old_joint_traj[1:] - old_joint_traj[:-1]).ptp(axis=0), i_start, i_end
                if arm_moved(old_joint_traj):       
                    lr2oldtraj[lr] = old_joint_traj   
            if len(lr2oldtraj) > 0:
                old_total_traj = np.concatenate(lr2oldtraj.values(), 1)
                JOINT_LENGTH_PER_STEP = .1
                _, timesteps_rs = unif_resample(old_total_traj, JOINT_LENGTH_PER_STEP)
            ####

        
            ### Generate fullbody traj
            bodypart2traj = {}            
            for (lr,old_joint_traj) in lr2oldtraj.items():

                manip_name = {"l":"leftarm", "r":"rightarm"}[lr]
                 
                old_joint_traj_rs = mu.interp2d(timesteps_rs, np.arange(len(old_joint_traj)), old_joint_traj)

                ee_link_name = "%s_gripper_tool_frame"%lr
                new_ee_traj = link2eetraj[ee_link_name][i_start:i_end+1]          
                new_ee_traj_rs = resampling.interp_hmats(timesteps_rs, np.arange(len(new_ee_traj)), new_ee_traj)
                if args.execution: Globals.pr2.update_rave()
                new_joint_traj = planning.plan_follow_traj(Globals.robot, manip_name,
                 Globals.robot.GetLink(ee_link_name), new_ee_traj_rs,old_joint_traj_rs)
                part_name = {"l":"larm", "r":"rarm"}[lr]
                bodypart2traj[part_name] = new_joint_traj

        

            ################################    
            redprint("Executing joint trajectory for segment %s, part %i using arms '%s'"%(seg_name, i_miniseg, bodypart2traj.keys()))

            for lr in 'lr':
                success &= set_gripper_maybesim(lr, binarize_gripper(seg_info["%s_gripper_joint"%lr][i_start]))
                # Doesn't actually check if grab occurred, unfortunately

        
            if not success: break
        
            if len(bodypart2traj) > 0:
                success &= exec_traj_maybesim(bodypart2traj)

            if not success: break


        
        redprint("Segment %s result: %s"%(seg_name, success))
    
    
        if args.fake_data_segment: break
Beispiel #56
0
cyl_radius = 0.025
cyl_height = 0.3
cyl_pos0 = np.r_[.6, helix_radius, table_height + .25]
cyl_pos1 = np.r_[.6, -helix_radius, table_height + .35]

sim_objs = []
sim_objs.append(XmlSimulationObject("robots/pr2-beta-static.zae", dynamic=False))
sim_objs.append(BoxSimulationObject("table", [1, 0, table_height-.1], [.85, .85, .1], dynamic=False))
sim_objs.append(RopeSimulationObject("rope", init_rope_nodes, rope_params))
sim_objs.append(CylinderSimulationObject("cyl0", cyl_pos0, cyl_radius, cyl_height, dynamic=True))
sim_objs.append(CylinderSimulationObject("cyl1", cyl_pos1, cyl_radius, cyl_height, dynamic=True))

sim = DynamicSimulation()
sim.add_objects(sim_objs)
sim.create_viewer()

sim.robot.SetDOFValues([0.25], [sim.robot.GetJoint('torso_lift_joint').GetJointIndex()])
sim_util.reset_arms_to_side(sim)

# rotate cylinders by 90 deg
for i in range(2):
    bt_cyl = sim.bt_env.GetObjectByName('cyl%d'%i)
    T = openravepy.matrixFromAxisAngle(np.array([np.pi/2,0,0]))
    T[:3,3] = bt_cyl.GetTransform()[:3,3]
    bt_cyl.SetTransform(T) # SetTransform needs to be used in the Bullet object, not the openrave body
sim.update()

sim.settle(max_steps=1000)
sim.viewer.Idle()
Beispiel #57
0
def calc_T(xyz, rod):
    Tt = rave.matrixFromAxisAngle(rod)
    Tt[:3, 3] = xyz
    return Tt
def main():
    import IPython
    demofile = h5py.File(args.h5file, 'r')
    trajoptpy.SetInteractive(args.interactive)


    if args.log:
        LOG_DIR = osp.join(osp.expanduser("~/Data/do_task_logs"), datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
        os.mkdir(LOG_DIR)
        LOG_COUNT = 0
                        

    if args.execution:
        rospy.init_node("exec_task",disable_signals=True)
        Globals.pr2 = PR2.PR2()
        Globals.env = Globals.pr2.env
        Globals.robot = Globals.pr2.robot
        
    else:
        Globals.env = openravepy.Environment()
        Globals.env.StopSimulation()
        Globals.env.Load("robots/pr2-beta-static.zae")    
        Globals.robot = Globals.env.GetRobots()[0]

    if not args.eval.fake_data_segment:
        grabber = cloudprocpy.CloudGrabber()
        grabber.startRGBD()

    Globals.viewer = trajoptpy.GetViewer(Globals.env)

    #####################

    while True:
    
        redprint("Acquire point cloud")
        if args.eval.fake_data_segment:
            fake_seg = demofile[args.eval.fake_data_segment]
            new_xyz = np.squeeze(fake_seg["cloud_xyz"])
            hmat = openravepy.matrixFromAxisAngle(args.eval.fake_data_transform[3:6])
            hmat[:3,3] = args.eval.fake_data_transform[0:3]
            new_xyz = new_xyz.dot(hmat[:3,:3].T) + hmat[:3,3][None,:]
            r2r = ros2rave.RosToRave(Globals.robot, asarray(fake_seg["joint_states"]["name"]))
            r2r.set_values(Globals.robot, asarray(fake_seg["joint_states"]["position"][0]))
            #Globals.pr2.head.set_pan_tilt(0,1.2)
            #Globals.pr2.rarm.goto_posture('side')
            #Globals.pr2.larm.goto_posture('side')            
            #Globals.pr2.join_all()
            #time.sleep(2)
        else:
            #Globals.pr2.head.set_pan_tilt(0,1.2)
            #Globals.pr2.rarm.goto_posture('side')
            #Globals.pr2.larm.goto_posture('side')            
            #Globals.pr2.join_all()
            #time.sleep(2)

            
            Globals.pr2.update_rave()
            
            rgb, depth = grabber.getRGBD()
            T_w_k = berkeley_pr2.get_kinect_transform(Globals.robot)
            new_xyz = cloud_proc_func(rgb, depth, T_w_k)
            print "got new xyz"
            redprint(new_xyz)
            #grab_end(new_xyz)

    
        if args.log:
            LOG_COUNT += 1
            import cv2
            cv2.imwrite(osp.join(LOG_DIR,"rgb%i.png"%LOG_COUNT), rgb)
            cv2.imwrite(osp.join(LOG_DIR,"depth%i.png"%LOG_COUNT), depth)
            np.save(osp.join(LOG_DIR,"xyz%i.npy"%LOG_COUNT), new_xyz)
        

        


        ################################    
        redprint("Finding closest demonstration")
        if args.select_manual:
            seg_name = find_closest_manual(demofile, new_xyz)
        else:
            seg_name = find_closest_auto(demofile, new_xyz)
        
        seg_info = demofile[seg_name]
        redprint("closest demo: %s"%(seg_name))
        if "done" in seg_name:
            redprint("DONE!")
            break
    
    
        if args.log:
            with open(osp.join(LOG_DIR,"neighbor%i.txt"%LOG_COUNT),"w") as fh: fh.write(seg_name) 
        ################################



        ### Old end effector forces at r_gripper_tool_frame (including torques)
        old_forces = seg_info['end_effector_forces'][:,0:3,:]
        old_torques = seg_info['end_effector_forces'][:,3:6,:]


        redprint("Generating end-effector trajectory")    

        handles = []
        old_xyz = np.squeeze(seg_info["cloud_xyz"])
       


        scaled_old_xyz, src_params = registration.unit_boxify(old_xyz)
        scaled_new_xyz, targ_params = registration.unit_boxify(new_xyz)        
        f,_ = registration.tps_rpm_bij(scaled_old_xyz, scaled_new_xyz, plot_cb = tpsrpm_plot_cb,
                                       plotting=5 if args.animation else 0,rot_reg=np.r_[1e-4,1e-4,1e-1], n_iter=50, reg_init=10, reg_final=.1)
        f = registration.unscale_tps(f, src_params, targ_params)
        
        old_xyz_transformed = f.transform_points(old_xyz)

        #handles.append(Globals.env.plot3(old_xyz_transformed,5, np.array([(0,0,1,1) for i in old_xyz_transformed])))

        handles.extend(plotting_openrave.draw_grid(Globals.env, f.transform_points, old_xyz.min(axis=0)-np.r_[0,0,.1], old_xyz.max(axis=0)+np.r_[0,0,.1], xres = .1, yres = .1, zres = .04))        

        link2eetraj = {}
        for lr in 'r':
            link_name = "%s_gripper_tool_frame"%lr
            old_ee_traj = asarray(seg_info[link_name]["hmat"])
            new_ee_traj = f.transform_hmats(old_ee_traj)
            link2eetraj[link_name] = new_ee_traj
            #print old_ee_traj
            old_ee_pos = old_ee_traj[:,0:3,3]
            #print old_ee_pos

            # End effector forces as oppossed to end effector trajectories
            dfdxs = f.compute_jacobian(old_ee_pos)
            old_eefs = []
            new_eefs = []
            for i in xrange(len(old_forces)):
                dfdx = np.matrix(dfdxs[i])
                old_force = np.matrix(old_forces[i])
                old_torque = np.matrix(old_torques[i])

                new_force = (dfdx * old_force)
                new_torque = (dfdx * old_torque)
                old_eefs.append(np.vstack((old_force, old_torque)))
                new_eefs.append(np.vstack((new_force, new_torque)))

            old_eefs = np.asarray(old_eefs)[:,:,0]
            new_eefs = np.asarray(new_eefs)[:,:,0]

            force_data = {}
            force_data['old_eefs'] = old_eefs
            force_data['new_eefs'] = new_eefs
            force_file = open("trial.pickle", 'wa')
            pickle.dump(force_data, force_file)
            force_file.close()
            new_ee_traj_x = new_ee_traj
            
        
        miniseg_starts, miniseg_ends = split_trajectory_by_gripper(seg_info)    
        success = True
        
        
        print colorize.colorize("mini segments:", "red"), miniseg_starts, miniseg_ends
        for (i_miniseg, (i_start, i_end)) in enumerate(zip(miniseg_starts, miniseg_ends)):
            
            if args.execution=="real": Globals.pr2.update_rave()


            ################################    
            redprint("Generating joint trajectory for segment %s, part %i"%(seg_name, i_miniseg))
            
            
            
            # figure out how we're gonna resample stuff
            lr2oldtraj = {}
            for lr in 'r':
                manip_name = {"l":"leftarm", "r":"rightarm"}[lr]
                old_joint_traj = asarray(seg_info[manip_name][i_start:i_end+1])
                #print (old_joint_traj[1:] - old_joint_traj[:-1]).ptp(axis=0), i_start, i_end
                #if arm_moved(old_joint_traj): NOT SURE WHY BUT THIS IS RETURNING FALSE
                lr2oldtraj[lr] = old_joint_traj

            if args.visualize:
                    r2r = ros2rave.RosToRave(Globals.robot, asarray(seg_info["joint_states"]["name"]))
                    r2r.set_values(Globals.robot, asarray(seg_info["joint_states"]["position"][0]))
                    for i in range(0, lr2oldtraj['r'].shape[0], 10):
                        handles = []
                        handles.append(Globals.env.plot3(new_xyz,5,np.array([(0,1,0,1) for x in new_xyz])))
                        handles.append(Globals.env.plot3(old_xyz,5,np.array([(1,0,0,1) for x in old_xyz])))
                        handles.append(Globals.env.drawlinestrip(old_ee_traj[:,:3,3], 2, (1,0,0,1)))
                        # Setting robot arm to joint trajectory
                        r_vals = lr2oldtraj['r'][i,:]
                        Globals.robot.SetDOFValues(r_vals, Globals.robot.GetManipulator('rightarm').GetArmIndices())
                        # Plotting forces from r_gripper_tool_frame
                        hmats = Globals.robot.GetLinkTransformations()
                        trans, rot = conv.hmat_to_trans_rot(hmats[-3])
                        f_start = np.array([0,0,0]) + trans
                        #IPython.embed()
                        f_end = np.array(old_forces[i].T)[0]/100 + trans
                        handles.append(Globals.env.drawlinestrip(np.array([f_start, f_end]), 10, (1,0,0,1)))
                        
                        redprint(i)
                        Globals.viewer.Step()
            if len(lr2oldtraj) > 0:
                old_total_traj = np.concatenate(lr2oldtraj.values(), 1)
                JOINT_LENGTH_PER_STEP = .1
                # FIRST RESAMPLING HAPPENS HERE: JOINT_LENGTH
                _, timesteps_rs = unif_resample(old_total_traj, JOINT_LENGTH_PER_STEP) # Timesteps only, can use to inter eefs for first time
            ####
            new_eefs_segment = asarray(new_eefs[i_start:i_end+1,:]) # Extract miniseg, and re-sample

            if args.no_ds:
                new_eefs_segment_rs = new_eefs_segment
            else:
                new_eefs_segment_rs = mu.interp2d(timesteps_rs, np.arange(len(new_eefs_segment)), new_eefs_segment)
        
            ### Generate fullbody traj
            bodypart2traj = {}
            for (lr,old_joint_traj) in lr2oldtraj.items():

                manip_name = {"l":"leftarm", "r":"rightarm"}[lr]
                ee_link_name = "%s_gripper_tool_frame"%lr
                new_ee_traj = link2eetraj[ee_link_name][i_start:i_end+1]
                if args.no_ds:
                    old_joint_traj_rs = old_joint_traj
                    new_ee_traj_rs = new_ee_traj
                    ds_inds = np.arange(0, len(new_ee_traj_rs), args.trajopt_ds)
                    new_ee_traj_rs = new_ee_traj_rs[ds_inds]
                    
                    old_joint_traj_rs = old_joint_traj_rs[ds_inds]
                    new_joint_traj = planning.plan_follow_traj(Globals.robot, manip_name,
                        Globals.robot.GetLink(ee_link_name), new_ee_traj_rs,old_joint_traj_rs)
                    new_joint_traj = mu.interp2d(np.arange(len(old_joint_traj)), np.arange(0, len(new_joint_traj) * args.trajopt_ds, args.trajopt_ds), new_joint_traj)
                else:
                    old_joint_traj_rs = mu.interp2d(timesteps_rs, np.arange(len(old_joint_traj)), old_joint_traj)
                    new_ee_traj_rs = resampling.interp_hmats(timesteps_rs, np.arange(len(new_ee_traj)), new_ee_traj)
                    new_joint_traj = planning.plan_follow_traj(Globals.robot, manip_name,
                        Globals.robot.GetLink(ee_link_name), new_ee_traj_rs,old_joint_traj_rs)
                if args.execution: Globals.pr2.update_rave()
                part_name = {"l":"larm", "r":"rarm"}[lr]
                bodypart2traj[part_name] = new_joint_traj
                redprint("Joint trajectory has length: " + str(len(bodypart2traj[part_name])))
                
            redprint("Executing joint trajectory for segment %s, part %i using arms '%s'"%(seg_name, i_miniseg, bodypart2traj.keys()))


            #for lr in 'r':
            #    set_gripper_maybesim(lr, binarize_gripper(seg_info["%s_gripper_joint"%lr][i_start]))

            if args.pid:
                # Add trajectory to arrive at to initial state
                redprint("Press enter to use current position")
                raw_input()
                (arm_position, arm_velocity, arm_effort) = robot_states.call_return_joint_states(robot_states.r_arm_joint_names)
                diff = np.array(arm_position - bodypart2traj['rarm'][0,:])
                maximum = max(abs(diff))
                speed = (300.0/360.0*2*(np.pi))
                time_needed = maximum / speed 
                initial_pos_traj = mu.interp2d(np.arange(0, time_needed, 0.01), np.array([0,time_needed]), np.array([arm_position, bodypart2traj['rarm'][0,:]]))
                # length of the intial trajectory, use this length for padding PD gains
                initial_traj_length = initial_pos_traj.shape[0]
                initial_force_traj = np.array([np.zeros(6) for i in range(initial_traj_length)])

                temptraj = bodypart2traj['rarm']
                bodypart2traj['rarm'] = np.concatenate((initial_pos_traj, temptraj), axis=0)

                complete_force_traj = np.concatenate((initial_force_traj, new_eefs), axis=0)

                # SECOND RESAMPLING HAPPENS HERE: JOINT VELOCITIES
                if args.no_ds:
                    traj = bodypart2traj['rarm']
                    stamps = asarray(seg_info['stamps'])
                    times = np.array([stamps[i_end] - stamps[i_start]])
                    F = complete_force_traj
                else:
                    times, times_up, traj = pr2_trajectories.return_timed_traj(Globals.pr2, bodypart2traj) # use times and times_up to interpolate the second time
                    F = mu.interp2d(times_up, times, complete_force_traj)
                
                #Globals.robot.SetDOFValues(asarray(fake_seg["joint_states"]["position"][0]))
                if args.visualize:
                    r2r = ros2rave.RosToRave(Globals.robot, asarray(seg_info["joint_states"]["name"]))
                    r2r.set_values(Globals.robot, asarray(seg_info["joint_states"]["position"][0]))
                    for i in range(0, traj.shape[0], 10):
                        handles = []
                        handles.append(Globals.env.plot3(new_xyz,5,np.array([(0,1,0,1) for x in new_xyz])))
                        handles.append(Globals.env.plot3(old_xyz,5,np.array([(1,0,0,1) for x in old_xyz])))
                        handles.append(Globals.env.drawlinestrip(old_ee_traj[:,:3,3], 2, (1,0,0,1)))
                        handles.append(Globals.env.drawlinestrip(new_ee_traj[:,:3,3], 2, (0,1,0,1)))
                        handles.append(Globals.env.drawlinestrip(new_ee_traj_rs[:,:3,3], 2, (0,0,1,1)))
                        # Setting robot arm to joint trajectory
                        r_vals = traj[i,:]
                        Globals.robot.SetDOFValues(r_vals, Globals.robot.GetManipulator('rightarm').GetArmIndices())
                        
                        # Plotting forces from r_gripper_tool_frame
                        hmats = Globals.robot.GetLinkTransformations()
                        trans, rot = conv.hmat_to_trans_rot(hmats[-3])
                        f_start = np.array([0,0,0]) + trans
                        f_end = np.array(old_forces[i].T)[0]/100 + trans
                        handles.append(Globals.env.drawlinestrip(np.array([f_start, f_end]), 10, (1,0,0,1)))
                        
                        redprint(i)
                        Globals.viewer.Idle()









                # Controller code in joint space
                traj_length = traj.shape[0]
                pgains = np.asarray([2400.0, 1200.0, 1000.0, 700.0, 300.0, 300.0, 300.0])
                dgains = np.asarray([18.0, 10.0, 6.0, 4.0, 6.0, 4.0, 4.0])
                m = np.array([3.33, 1.16, 0.1, 0.25, 0.133, 0.0727, 0.0727]) # masses in joint space (feed forward)
                vel_factor = 1e-3
                #new costs
                covariances, means = analyze_data.run_analyze_data(args)
                CostsNew = []
                counterCovs = 0
                endTraj = False
                covThiss = []
                for covMat in covariances:
                    covThis = np.zeros((18,18))
                    covThis[0:6,0:6] = covMat[0:6,0:6]
                    covThis[12:18,12:18] = covMat[6:12, 6:12]
                    covThis[0:6, 12:18] = covMat[0:6, 6:12]
                    covThis[12:18, 0:6] = covMat[6:12, 0:6]
                    covThis[6:12, 6:12] = np.eye(6)*vel_factor
                    covThis = np.diag(np.diag(covThis))
                    covThiss.append(covThis)
                    #if len(covThiss) <= 69 and len(covThiss) >= 47:
                    #    covThis[12:18,12:18] = np.diag([0.13, 0.06, 0.07, 0.005, 0.01, 0.004])
                    for j in range(args.eval.downsample_traj):
                        invCov = np.linalg.inv(covThis)
                        CostsNew.append(invCov)
                        counterCovs = counterCovs + 1
                        if counterCovs >= traj_length:
                            endTraj = True
                            break
                    if endTraj:
                        break
                allCs = []
                x = np.ones(6) * 1 
                v = np.ones(6) * 1e-3
                a = np.ones(6) * 1e-6
                Ct = np.diag(np.hstack((x, v, a))) # in end effector space
                Ms = []
                num_steps = i_end - i_start + 1
                
                
                stamps = asarray(seg_info['stamps'][i_start:i_end+1])


                arm = Globals.robot.GetManipulator("rightarm")
                jacobians = []
                for i in range(traj.shape[0]):
                    r_vals = traj[i,:]
                    Globals.robot.SetDOFValues(r_vals, Globals.robot.GetManipulator('rightarm').GetArmIndices())
                    jacobians.append(np.vstack((arm.CalculateJacobian(), arm.CalculateAngularVelocityJacobian())))
                jacobians =  asarray(jacobians)
                #jacobiansx = asarray(seg_info["jacobians"][i_start:i_end+1])
                for t in range(num_steps):
                    M = np.zeros((18, 21))
                    J = jacobians[t]
                    M[0:6,0:7] = J
                    M[6:12,7:14] = J
                    mdiag = np.diag(m) # Convert joint feedforward values into diagonal array
                    mdiag_inv = np.linalg.inv(mdiag)
                    M[12:18,14:21] = np.linalg.inv((J.dot(mdiag_inv).dot(np.transpose(J)))).dot(J).dot(mdiag_inv)
                    Ms.append(M)
                for t in range(num_steps):
                    topad = np.zeros((21,21))
                    topad[0:7,0:7] = np.diag(pgains) * 0.1
                    topad[7:14,7:14] = np.diag(dgains) * 0.1
                    topad[14:21,14:21] = np.eye(7) * 0.1
                    #allCs.append(np.transpose(Ms[t]).dot(Ct).dot(Ms[t]) + topad)
                    allCs.append(np.transpose(Ms[t]).dot(CostsNew[t]).dot(Ms[t]) + topad)
                Kps = []
                Kvs = []
                Ks = []
                Qt = None
                Vs = []
                for t in range(num_steps-1, -1, -1):
                    if Qt is None:
                        Qt = allCs[t]
                    else:
                        Ft = np.zeros((14, 21))
                        for j in range(14):
                            Ft[j][j] = 1.0
                        deltat = abs(stamps[t+1] - stamps[t])
                        #print(deltat)
                        for j in range(7):
                            Ft[j][j+7] = deltat
                        for j in range(7):
                            Ft[j+7][j+14] = deltat/m[j]  
                        for j in range(7):
                            Ft[j][j+14] = ((deltat)**2)/m[j]
                        Qt = allCs[t] + (np.transpose(Ft).dot(Vs[num_steps-2-t]).dot(Ft))
                    Qxx = Qt[0:14, 0:14]
                    Quu = Qt[14:21, 14:21]
                    Qxu = Qt[0:14, 14:21]
                    Qux = Qt[14:21, 0:14]
                    Quuinv = np.linalg.inv(Quu)
                    Vt = Qxx - Qxu.dot(Quuinv).dot(Qux)
                    Vt = 0.5*(Vt + np.transpose(Vt))
                    Kt = -1*(Quuinv.dot(Qux))
                    Ks.append(Kt)
                    Kps.append(Kt[:, 0:7])
                    Kvs.append(Kt[:, 7:14])
                    Vs.append(Vt)

                Ks = Ks[::-1]
                Kps = Kps[::-1]
                Kvs = Kvs[::-1]


                JKpJ = np.asarray(Kps)
                JKvJ = np.asarray(Kvs)




                total_length = num_steps + initial_traj_length

                # Pad initial traj with PD gains
                pgainsdiag = np.diag(np.asarray([-2400.0, -1200.0, -1000.0, -700.0, -300.0, -300.0, -300.0]))
                dgainsdiag = np.diag(np.asarray([-18.0, -10.0, -6.0, -4.0, -6.0, -4.0, -4.0]))
                addkp = np.asarray([pgainsdiag for i in range(initial_traj_length)])
                addkv = np.asarray([dgainsdiag for i in range(initial_traj_length)])
                JKpJ = np.concatenate((addkp, JKpJ), axis=0)
                JKvJ = np.concatenate((addkv, JKvJ), axis=0)
                
                Kps = []
                Kvs = []

                for i in range(num_steps + initial_traj_length):
                    Kps.append(np.zeros((6,6)))
                    Kvs.append(np.zeros((6,6)))

                Kps = np.asarray(Kps)
                Kvs = np.asarray(Kvs)

                # Gains as JKpJ and JKvJ for testing
                if args.pdgains:
                    JKpJ = np.asarray([pgainsdiag for i in range(total_length)])
                    JKvJ = np.asarray([dgainsdiag for i in range(total_length)])
                


                qs = traj
                IPython.embed()


                Kps = np.resize(Kps, (1, 36 * total_length))[0]
                Kvs = np.resize(Kvs, (1, 36 * total_length))[0]
                
                JKvJ = np.resize(JKvJ, (1, 49*total_length))[0]
                JKpJ = np.resize(JKpJ, (1, 49*total_length))[0]
                
                # For use with new controller                

                qs = np.resize(qs, (1, qs.shape[0]*7))[0] #resize the data to 1x7n (n being the number of steps)
                
                F = np.resize(F, (1, F.shape[0]*6))[0] #resize the data to 1x6n
                

                # [traj, Kp, Kv, F, use_force, seconds]
                data = np.zeros((1, len(qs) + len(JKpJ) + len(JKvJ) + len(F) + len(Kps) + len(Kvs) + 2))
                data[0][0:len(qs)] = qs
                data[0][len(qs):len(qs)+len(JKpJ)] = JKpJ
                data[0][len(qs)+len(JKpJ):len(qs)+len(JKpJ)+len(JKvJ)] = JKvJ
                data[0][len(qs)+len(JKpJ)+len(JKvJ):len(qs)+len(JKpJ)+len(JKvJ)+len(F)] = F
                data[0][len(qs)+len(JKpJ)+len(JKvJ)+len(F):len(qs)+len(JKpJ)+len(JKvJ)+len(F)+len(Kps)] = Kps
                data[0][len(qs)+len(JKpJ)+len(JKvJ)+len(F)+len(Kps):len(qs)+len(JKpJ)+len(JKvJ)+len(F)+len(Kps)+len(Kvs)] = Kvs
                data[0][-2] = args.use_force
                data[0][-1] = stamps[i_end] - stamps[i_start] + (initial_traj_length * 0.01)
                
                # For use with old controller
                """
                qs = np.array(np.matrix(traj).T) # 7 x n
                qs = np.resize(qs, (1, qs.shape[1]*7))[0] #resize the data to 1x7n (n being the number of steps)
                F = np.array(np.matrix(F).T) # 6 x n
                F = F[0:3,:]
                F = np.resize(F, (1, F.shape[1]*3))[0] #resize the data to 1x3n
                data = np.zeros((1, len(qs) + len(F) + 2))
                data[0][0:len(qs)] = qs
                data[0][len(qs):len(qs)+len(F)] = F
                data[0][-1] = args.use_force
                data[0][-2] = stamps[i_end] - stamps[i_start]
                """


                msg = Float64MultiArray()
                msg.data = data[0].tolist()
                pub = rospy.Publisher("/controller_data", Float64MultiArray)
                redprint("Press enter to start trajectory")
                raw_input()
                time.sleep(1)
                pub.publish(msg)
                time.sleep(1)
            else:
                #if not success: break
                
                if len(bodypart2traj) > 0:
                    exec_traj_maybesim(bodypart2traj)