예제 #1
0
def create_data():
    grasps = grasp_proxy.detect_grasps_whole_cloud(cloud, grasp_offsets)
    grasps = filter(
        lambda g: (grasp_space[0][0] < g.bottom[0] < grasp_space[0][
            1] and grasp_space[1][0] < g.bottom[1] < grasp_space[1][1] and
                   grasp_space[2][0] < g.bottom[2] < grasp_space[2][1]),
        grasps)

    print 'grasps after filter: ' + str(len(grasps))
    rviz_node.cloud_pub.publish(cloud_proxy.convert_to_point_cloud2(cloud))
    rviz_node.all_grasps_pub.publish(
        plot.createGraspsMarkerArray(grasps, rgba=[1, 0, 0, 0.5]))
    rviz_node.grasp_space_pub.publish(plot.createGraspsPosCube(grasp_space))

    image_3d = []
    image_2d = []
    for grasp in grasps:
        hand_des_3d = HandDescriptor(
            HandDescriptor.T_from_approach_axis_center(
                grasp.approach, grasp.axis, (grasp.bottom + grasp.top) / 2),
            32)
        hand_des_3d.generate_3d_binary_image(cloud)
        image_3d.append(hand_des_3d.image)

        hand_des_2d = HandDescriptor(
            HandDescriptor.T_from_approach_axis_center(
                grasp.approach, grasp.axis, (grasp.bottom + grasp.top) / 2))
        hand_des_2d.generate_depth_image(cloud)
        image_2d.append(hand_des_2d.image)

    image_3d = np.stack(image_3d, 0)
    image_2d = np.stack(image_2d, 0)

    f_3d = os.path.dirname(__file__) + '/../data/normal_shelf1_3d.npy'
    f_2d = os.path.dirname(__file__) + '/../data/normal_shelf1_2d.npy'
    try:
        old_3d = np.load(f_3d)
        if old_3d.shape[0] != 0:
            image_3d = np.vstack([image_3d, old_3d])
    except:
        pass
    try:
        old_2d = np.load(f_2d)
        if old_2d.shape[0] != 0:
            image_2d = np.vstack([image_2d, old_2d])
    except:
        pass
    finally:
        raw_input('write?')
        np.save(f_3d, image_3d)
        print 'current 3d data: ' + str(image_3d.shape[0])
        np.save(f_2d, image_2d)
        print 'current 2d data: ' + str(image_2d.shape[0])
  def SenseAndActMultiple(self, hand, prevDescs, prevProbs, cloud, tableHeight, t):
    '''TODO'''
    
    prevDesc = self.initDesc
    
    # generate input image
    targImage = prevDesc.GenerateHeightmap(cloud, tableHeight)
    handImage = zeros((self.imP, self.imP, 0)) if hand is None else hand.image
    timeImage = zeros((self.imP, self.imP, 0)) if not self.includeTime else \
      float(self.tMax - t) / self.tMax * ones((self.imP, self.imP, 1))
    o = concatenate((targImage, handImage, timeImage), axis = 2)

    # evaluate action-values and sample n of them
    values = self.EvaluateActions(o).flatten()
    flatActionIdxs = argsort(-values)[:len(prevDescs)]
    values = values[flatActionIdxs]
    
    # convert values into probabilities (of separate Bernoulli random variables)
    values[values > self.qMax] = self.qMax
    values[values < self.qMin] = self.qMin
    probabilities = (values - self.qMin) / (self.qMax - self.qMin)
    
    # create a descriptor for each sampled action
    descriptors = []
    for flatIdx in flatActionIdxs:
      idx = unravel_index(flatIdx, self.outputShape)
      T = copy(prevDesc.T)
      T[0:3, 3] = self.actionsInHandFrame[idx] + prevDesc.center
      descriptors.append(HandDescriptor(T, self.imP, self.imDNext, self.imWNext))

    return descriptors, probabilities
예제 #3
0
    def SenseAndAct(self, hand, prevDesc, t, rlEnv, unbias):
        '''TODO'''

        handImage = zeros((self.imP, self.imP,
                           0), dtype='float32') if hand is None else hand.image

        # generate candidate descriptors
        descs = []
        for i in xrange(self.actionSpaceSize[0]):
            for j in xrange(self.actionSpaceSize[1]):
                for k in xrange(self.actionSpaceSize[2]):
                    T = copy(prevDesc.T)
                    T[0:3,
                      3] = self.actionsInHandFrame[(i, j, k)] + prevDesc.center
                    descs.append(
                        HandDescriptor(T, self.imP, self.imD, self.imW))

        # decide which location in the image to zoom into
        bestIdx, bestValue, epsilon = self.SelectIndexEpsilonGreedy(
            descs, handImage, unbias, rlEnv)

        # compose result
        desc = descs[bestIdx]
        o = concatenate((desc.image, handImage), axis=2)

        return o, desc, bestValue, epsilon
 def SetInitialDescriptor(self, offset):
   '''Sets the center of the initial descriptor to the provided center.'''
   
   T = eye(4)
   T[0:3, 3] = array([offset[0], offset[1], offset[2] + self.imD / 2.0])
   self.initDesc = HandDescriptor(T, self.imP, self.imD, self.imW)
   self.initDesc.imW = self.imW
   self.initDesc.imD = self.imD
예제 #5
0
  def SetInitialDescriptor(self, center):
    '''Sets the center of the initial descriptor to the provided center.'''

    approach = array([0,0,-1]); axis = array([1,0,0])
    bTh = hand_descriptor.PoseFromApproachAxisCenter(approach, axis, center)
    self.initDesc = HandDescriptor(bTh)
    self.initDesc.imD = 4*self.initDesc.imD
    self.initDesc.imH = 4*self.initDesc.imH
    self.initDesc.imW = 4*self.initDesc.imW
예제 #6
0
    def ComposeAction(self, prevDesc, baseCoord, imageCoord):
        '''Creates a new action and hand descriptor objects.'''

        action = [prevDesc.image, copy(imageCoord)]

        T = copy(prevDesc.T)
        T[0:3, 3] = baseCoord
        desc = HandDescriptor(T)

        return action, desc
    def UnpackGrasps(self, mGrasps):
        '''Extracts the list of grasps in Matlab format and returns a list in Python format.'''

        grasps = []
        for mGrasp in mGrasps:

            top = array(mGrasp["top"]).flatten()
            bottom = array(mGrasp["bottom"]).flatten()
            axis = array(mGrasp["axis"]).flatten()
            approach = array(mGrasp["approach"]).flatten()
            score = mGrasp["score"]

            # create grasp object
            T = hand_descriptor.PoseFromApproachAxisCenter(
                approach, axis, 0.5 * bottom + 0.5 * top)
            grasp = HandDescriptor(T)
            grasp.score = score
            grasps.append(grasp)

        return grasps
예제 #8
0
  def ComposeAction(self, prevDesc, theta):
    '''Creates a new action and hand descriptor objects.'''

    action = [prevDesc.image, copy(theta)]

    R = openravepy.matrixFromAxisAngle(prevDesc.approach, theta[0])[0:3,0:3]

    approach = prevDesc.approach
    axis = dot(R, prevDesc.axis)
    center = prevDesc.center

    T = hand_descriptor.PoseFromApproachAxisCenter(approach, axis, center)
    desc = HandDescriptor(T)

    return action, desc
예제 #9
0
    def SenseAndAct(self, hand, prevDesc, t, rlEnv, unbias):
        '''TODO'''

        # generate input image
        targImage = prevDesc.GenerateHeightmap(rlEnv)
        handImage = zeros((self.imP, self.imP,
                           0), dtype='float32') if hand is None else hand.image
        o = concatenate((targImage, handImage), axis=2)

        # decide which location in the image to zoom into
        bestIdx, bestValue, epsilon = self.SelectIndexEpsilonGreedy(o, unbias)

        # compose result
        T = copy(prevDesc.T)
        T[0:3, 3] = self.actionsInHandFrame[bestIdx] + prevDesc.center
        desc = HandDescriptor(T, self.imP, self.imDNext, self.imWNext)
        a = bestIdx

        return o, a, desc, bestValue, epsilon
예제 #10
0
    def InitializeHandRegions(self):
        '''Determines hand geometry, in the descriptor reference frame, for collision checking. Should
    be called once at initialization.
  '''

        # find default descriptor geometry
        desc = HandDescriptor(eye(4), self.params["imP"], self.params["imD"],
                              self.params["imW"])

        # important reference points
        topUp = desc.top + (desc.height / 2) * desc.axis
        topDn = desc.top - (desc.height / 2) * desc.axis
        BtmUp = desc.top + (desc.height / 2) * desc.axis
        BtmDn = desc.top - (desc.height / 2) * desc.axis

        # cuboids representing hand regions, in workspace format
        self.handClosingRegion = [(-desc.height / 2, desc.height / 2),
                                  (-desc.width / 2, desc.width / 2),
                                  (-desc.depth / 2, desc.depth / 2)]

        self.handFingerRegionL = [(-desc.height / 2, desc.height / 2),
                                  (-desc.width / 2 - 0.01, -desc.width / 2),
                                  (-desc.depth / 2, desc.depth / 2)]

        self.handFingerRegionR = [(-desc.height / 2, desc.height / 2),
                                  (desc.width / 2, desc.width / 2 + 0.01),
                                  (-desc.depth / 2, desc.depth / 2)]

        self.handTopRegion = [(-desc.height / 2, desc.height / 2),
                              (-desc.width / 2 - 0.01, desc.width / 2 + 0.01),
                              (desc.depth / 2, desc.depth / 2 + 0.01)]

        # find corners of hand collision geometry
        self.externalHandPoints = array([ \
          topUp + ((desc.width / 2) + 0.01) * desc.binormal,
          topUp - ((desc.width / 2) + 0.01) * desc.binormal,
          topDn + ((desc.width / 2) + 0.01) * desc.binormal,
          topDn - ((desc.width / 2) + 0.01) * desc.binormal,
          BtmUp + ((desc.width / 2) + 0.01) * desc.binormal,
          BtmUp - ((desc.width / 2) + 0.01) * desc.binormal,
          BtmDn + ((desc.width / 2) + 0.01) * desc.binormal,
          BtmDn - ((desc.width / 2) + 0.01) * desc.binormal, ])
예제 #11
0
  def SenseAndAct(self, hand, prevDesc, cloud, tableHeight, t, unbias):
    '''TODO'''
    
    # generate input image
    targImage = prevDesc.GenerateHeightmap(cloud, tableHeight)
    handImage = zeros((self.imP, self.imP, 0), dtype="float32") if hand is None else hand.image
    timeImage = zeros((self.imP, self.imP, 0), dtype="float32") if not self.includeTime else \
      float(self.tMax - t) / self.tMax * ones((self.imP, self.imP, 1), dtype="float32")
    o = concatenate((targImage, handImage, timeImage), axis = 2)
    
    # decide which location in the image to zoom into
    bestIdx, bestValue, epsilon = self.SelectIndexEpsilonGreedy(o, unbias)

    # compose result
    T = copy(prevDesc.T)
    T[0:3, 3] = self.actionsInHandFrame[bestIdx] + prevDesc.center
    desc = HandDescriptor(T, self.imP, self.imDNext, self.imWNext)
    a = bestIdx

    return o, a, desc, bestValue, epsilon
예제 #12
0
    def SenseAndActMultiple(self, hand, prevDescs, prevProbs, cloud,
                            tableHeight, t):
        '''TODO'''

        # generate input image
        observations = []
        for prevDesc in prevDescs:
            targImage = prevDesc.GenerateHeightmap(cloud, tableHeight)
            handImage = zeros(
                (self.imP, self.imP, 0)) if hand is None else hand.image
            timeImage = zeros((self.imP, self.imP, 0)) if not self.includeTime else \
              float(self.tMax - t) / self.tMax * ones((self.imP, self.imP, 1))
            o = concatenate((targImage, handImage, timeImage), axis=2)
            observations.append(o)

        # evaluate action-values and sample n of them
        valuesBatch = self.EvaluateActionsMultiple(observations).flatten()
        flatActionIdxs = argsort(-valuesBatch)[:len(prevDescs)]
        values = valuesBatch[flatActionIdxs]

        # convert values into probabilities (of separate Bernoulli random variables)
        values[values > self.qMax] = self.qMax
        values[values < self.qMin] = self.qMin
        probabilities = (values - self.qMin) / (self.qMax - self.qMin)

        # create a descriptor for each sampled action
        descriptors = []
        for i, flatIdx in enumerate(flatActionIdxs):
            idx = unravel_index(flatIdx, (len(prevDescs), ) + self.outputShape)
            T = copy(prevDescs[idx[0]].T)
            T[0:3, 3] += self.actionsInHandFrame[idx[1:]]
            descriptors.append(
                HandDescriptor(T, self.imP, self.imDNext, self.imWNext))
            probabilities[i] *= prevProbs[idx[0]]
            if self.plotImages and i == 0:
                print("Probability: {}.".format(probabilities[i]))
                self.PlotImages(observations[idx[0]], idx[1:], descriptors[-1])

        return descriptors, probabilities
예제 #13
0
    def SenseAndAct(self, hand, prevDesc, cloudTree, epsilon):
        '''Senses the current state, s, and determines the next action, a.'''

        # generate image for base frame descriptor
        newDesc = HandDescriptor(copy(prevDesc.T))
        newDesc.imH = 0.105
        newDesc.imW = 0.105
        newDesc.imD = 0.105
        newDesc.GenerateDepthImage(cloudTree.data, cloudTree)
        s = self.emptyState if hand is None else [
            hand.image, self.emptyStateVector
        ]

        # decide which location in the image to zoom into
        bX, iX = self.SampleActions(newDesc, cloudTree)
        bestIdx, bestValue = self.SelectIndexEpsilonGreedy(
            s, newDesc.image, iX, epsilon)

        # compose action
        a, desc = self.ComposeAction(newDesc, bX[bestIdx], iX[bestIdx])

        return s, a, desc, bestValue
예제 #14
0
    def SetInitialDescriptor(self):
        '''Sets the center of the initial descriptor to the provided center.'''

        T = eye(4)
        T[0:3, 3] = array([0, 0, self.imD / 2.0])
        self.initDesc = HandDescriptor(T, self.imP, self.imD, self.imW)
예제 #15
0
cloud_proxy = CloudProxy()
grasp_proxy = GraspProxy()
rviz_node = RvizNode()
plot_rviz = PlotRviz()
plot_rviz.initRos()

cloud = point_cloud.LoadPcd(
    os.path.dirname(__file__) + '/../data/pcd/' + cloud_name)
cloud = point_cloud_util.filter_workspace(workspace, cloud)

grasps = grasp_proxy.detect_grasps_whole_cloud(cloud, grasp_offsets)
image_3d = []

for grasp in grasps:
    hand_des_3d = HandDescriptor(
        HandDescriptor.T_from_approach_axis_center(
            grasp.approach, grasp.axis, (grasp.bottom + grasp.top) / 2), 32)
    hand_des_3d.generate_3d_binary_image(cloud)
    image_3d.append(hand_des_3d.image)

cnn.eval()
image_3d = np.stack(image_3d, 0)
inputs = torch.FloatTensor(image_3d).unsqueeze(1).cuda()
inputs = Variable(inputs, volatile=True)
outputs = cnn(inputs)
prediction = outputs.data.max(1, keepdim=True)[1]
prediction = prediction.squeeze().tolist()
edge_grasp = []
non_edge_grasp = []
for i in range(len(grasps)):
    if prediction[i] == 1:
예제 #16
0
cloud_proxy = CloudProxy()
grasp_proxy = GraspProxy()
rviz_node = RvizNode()
plot_rviz = PlotRviz()
plot_rviz.initRos()

cloud = point_cloud.LoadPcd(
    os.path.dirname(__file__) + '/../data/pcd/' + cloud_name)
cloud = point_cloud_util.filter_workspace(workspace, cloud)

grasps = grasp_proxy.detect_grasps_whole_cloud(cloud, grasp_offsets)
image_2d = []

for grasp in grasps:
    hand_des_2d = HandDescriptor(
        HandDescriptor.T_from_approach_axis_center(
            grasp.approach, grasp.axis, (grasp.bottom + grasp.top) / 2))
    hand_des_2d.generate_depth_image(cloud)
    image_2d.append(hand_des_2d.image)

cnn.eval()
image_2d = np.stack(image_2d, 0)
inputs = torch.FloatTensor(image_2d).cuda()
inputs = Variable(inputs, volatile=True)
outputs = cnn(inputs)
prediction = outputs.data.max(1, keepdim=True)[1]
prediction = prediction.squeeze().tolist()
edge_grasp = []
non_edge_grasp = []
for i in range(len(grasps)):
    if prediction[i] == 1: