Beispiel #1
0
    def Test10(self
               ):  # Description: Add noise and interpolate (take image at end)
        # do STLs with noise after this.

        V = Vis()
        Hand1 = HandVis(V)
        Hand1.loadHand()
        Hand2 = HandVis(V)
        Hand2.loadHand()
        Obj1 = ObjectVis(V)
        Obj1.loadObject(4)

        Data = ParseGraspData()
        grasps = Data.findGrasp(objnum=4,
                                subnum=4,
                                graspnum=11,
                                list2check=Data.all_transforms)
        grasp = grasps[1]
        HandT, ObjT, Arm_JA, Hand_JA = Data.matricesFromGrasp(grasp)
        Hand1.orientHandtoObj(HandT, ObjT, Obj1)
        Hand1.setJointAngles(Hand_JA)
        Hand1.getPalmPoint()
        contact_points1, contact_links1 = Hand1.retractFingers(Obj1)
        Contact_JA = Hand1.obj.GetDOFValues()

        filename_base = "obj%s_sub%s_graspnum%s_grasptype%s" % (4, 4, 11,
                                                                'extreme0')
        if False:
            T_noise, JA_noise = Hand2.addNoiseToGrasp(
                Obj1,
                T_zero=Hand1.obj.GetTransform(),
                Contact_JA=Contact_JA,
                TL_n=0.01,
                R_n=0.1,
                JA_n=0.1)
            np.savetxt(filename_base + "_%s.txt" % 'T_noise', T_noise)
            np.savetxt(filename_base + "_%s.txt" % 'JA_noise', JA_noise)
        else:
            JA_noise = np.genfromtxt(filename_base + "_%s.txt" % 'JA_noise')
            T_noise = np.genfromtxt(filename_base + "_%s.txt" % 'T_noise')
        Hand2.setJointAngles(JA_noise)
        Hand2.obj.SetTransform(T_noise)
        contact_points2, contact_links2 = Hand2.retractFingers(Obj1)
        alpha = np.linspace(0, 1, 6)[1:-1]
        start_axis_angle = np.array([0, 0, 0])
        end_axis_angle = np.array([0, np.pi, 0])
        Hand1.hide()
        for a in alpha:
            filename = filename_base + "_alpha%s.png" % (int(10 * a))
            Hand2.ZSLERP(start_axis_angle, end_axis_angle, a, T_zero=T_noise)
            V.clearPoints()
            V.takeImage(filename)
            time.sleep(1)

        pdb.set_trace()
Beispiel #2
0
    def Test9(self):  # Description: Record image of scene
        V = Vis()
        Hand1 = HandVis(V)
        Hand1.loadHand()
        Hand2 = HandVis(V)
        Hand2.loadHand()
        Obj1 = ObjectVis(V)
        Obj1.loadObject(4)
        pdb.set_trace()
        fname_save = 'test.png'
        V.takeImage(fname_save)  # test this!

        pdb.set_trace()
class ShapeImageGenerator(object):
	def __init__(self):
		self.model_path = ''
		self.vis = Vis()
		self.Hand = HandVis(self.vis)
		self.Hand.loadHand()
		self.Hand.localTranslation(np.array([0,0,-0.075])) #offset so palm is at 0,0,0
		self.Obj = ObjectGenericVis(self.vis)
		self.groundPlane = AddGroundPlane(self.vis)
		self.loadSTLFileList()

	def loadSTLFileList(self): # get all STL files in directory
		self.STLFileList = list()
		directory = curdir + '/../ShapeGenerator/Shapes'
		for root, dirs, filenames in os.walk(directory):
			if filenames != []:
				for fname in filenames:
					if os.path.splitext(fname)[1] == '.stl': #only want stl files
						self.STLFileList.append(root + '/' + fname)

	def valuesFromFileName(self, fn_abs): # gets features of object from file name
		fn = fn_abs.split('/')[-1]
		fn_parts = fn.strip('.stl').split('_')
		shape = fn_parts[0]
		h = int(fn_parts[1].strip('h'))
		w = int(fn_parts[2].strip('w'))
		e = int(fn_parts[3].strip('e'))
		a = None
		if len(fn_parts) == 5: # extra feature when there is alpha
			a = int(fn_parts[4].strip('a'))
		return shape, h, w, e, a
		
	def loadObject(self, objtype, h, w, e, a = None): # loads stl object into scene
		# only centroid that hasn't been dealt with is the handle.
		# all centroids shoudl be in the center of the object
		self.Obj.features['type'] = objtype
		self.Obj.features['h'] = h
		self.Obj.features['w'] = w
		self.Obj.features['e'] = e
		self.Obj.features['a'] = a

		result = self.Obj.loadObject(objtype, h, w, e, a)
		return result

	def setObjTransparent(self, alpha = 0.5): # change transparency of object
		self.Obj.changeColor(alpha = alpha)   # this can be changed to changeColor

	def readParameterFile(self, fn): # reads in parameters for creating images from a CSV file
		params_list = list()
		with open(fn, 'rb') as file:
			csvreader = csv.reader(file, delimiter = ',')
			headers = csvreader.next()
			for row in csvreader:
				D = dict()
				for header,val in zip(headers,row):
					D[header] = val
				params_list.append(D)
		for ip in range(len(params_list)):
			for k in params_list[ip].keys():
				if 'Joint Angles' == k:
					try:
						params_list[ip][k] = np.array(params_list[ip][k].split(',')).astype('float')#convert to numpy array
					except Exception: # for when that entry is blank like when no hand in image
						# params_list[ip][k] = np.zeros(10) # set to some position so error is not thrown
						pass
				elif 'Hand Matrix' == k: # extract array
					try:
						params_list[ip][k] = self.stringToArray(params_list[ip][k])
					except Exception: # for when that entry is blank like when no hand in image
						# params_list[ip][k] = np.eye(4) # set to some position so error is not thrown
						pass
				elif 'Image Save Name' == k:
					params_list[ip][k] += '.png' # add extension
				elif 'Model' == k:
					params_list[ip][k] += '.stl' # add extension
				elif 'Model Matrix' == k: # extract array
					mat_str = params_list[ip][k]
					mat_num = self.stringToArray(mat_str)
					params_list[ip][k] = mat_num
				elif 'Camera Transform' == k:
					params_list[ip][k] = np.array(params_list[ip][k].split(',')).astype('float')#convert to numpy array
				elif 'Image Size' == k:
					i = 1 # should do soemthing here
				else:
					print('Unexpected Key: %s' %(k))
		self.params_list = params_list
		return self.params_list

	def stringToArray(self, mat_str): # Description: convert string to array
		# had to do this because it is really easy to just copy and paste a matrix into spreadsheet (or save through DictWriter)
		# The matrix is also much easier to read in the csv file in this form
		# downside is that when you read it out, the brackets are part of the string.
		# this can be done in fewer, more efficient steps with regex, but i couldn't figure it out
		mat_re = re.findall(r'\[.*?\]', mat_str)
		mat_strip = [t.strip('[]') for t in mat_re]
		mat_num = np.array([t.split() for t in mat_strip]).astype('float')
		return mat_num

	def createImagesFromParametersList(self, shapes = None): # creates images from a list of parameters
		print("Total: %s" %(len(self.params_list)))
		counter = 0
		for params in self.params_list:
			counter += 1
			if ((shapes == None) or (params['Model'].split('_')[0] in shapes)): # allows only a single set of shapes to be made from list. Mostly during development
				imageSuccess = self.createImageFromParameters(params)
				# if not imageSuccess:
				# 	pdb.set_trace()
				print("Current: %s" %counter)

	def createImageFromParameters(self, params): # creates image from a single set of parameters
		objLoadSuccess = self.loadObjectFromParameters(params)
		if objLoadSuccess:
			self.groundPlane.createGroundPlane(y_height = self.Obj.h/2.0/100)
			# self.vis.changeBackgroundColor(self.groundPlane.groundPlane.GetLinks()[0].GetGeometries()[0].GetAmbientColor())
			self.Obj.changeColor('purpleI')
			self.Hand.changeColor('yellowI')
			cam_params = params['Camera Transform']
			self.vis.setCamera(cam_params[0], cam_params[1], cam_params[2])
			if params['Joint Angles'] is not '' and params['Hand Matrix'] is not '':
				self.Hand.show()
				self.Hand.setJointAngles(params['Joint Angles'])
				self.Hand.obj.SetTransform(params['Hand Matrix'])
			else: #for images where no hand is shown
				self.Hand.hide()
			self.Obj.obj.SetTransform(params['Model Matrix'])
			if np.sum(self.Obj.obj.GetTransform() - matrixFromAxisAngle([0,0,np.pi/2])) < 1e-4: #if object is rotated 90, use different dimension
				self.groundPlane.createGroundPlane(y_height = self.Obj.w/2.0/100)
			else: #offset by height if no rotation -- this is not a great solution when object starts to rotate!
				self.groundPlane.createGroundPlane(y_height = self.Obj.h/2.0/100)
			# this should definitely be taken care of when making the spreadsheet
			pts = self.Hand.getContactPoints()
			while len(pts) > 0:
				self.Hand.moveY(-0.001)
				pts = self.Hand.getContactPoints()
				# pdb.set_trace()
			self.vis.takeImage(params['Image Save Name'], delay = True)

			print("Image Recorded: %s" %params['Image Save Name'])
			return True
		else:
			print("Model Not Found: %s" %params['Model'])
			return False

	def loadObjectFromParameters(self, params): # loads objects from a single set of parameters
		objLoadSuccess = self.Obj.loadObjectFN(self.Obj.stl_path + params['Model'])
		return objLoadSuccess

	def loadHandFromParameters(self, params): # sets hand features from a single set of parameters
		self.Hand.show()
		self.Hand.setJointAngles(params['Joint Angles'])
		self.Hand.obj.SetTransform(params['Hand Matrix'])

	def loadSceneFromParameters(self, params):
		self.loadObjectFromParameters(params)
		self.loadHandFromParameters(params)

	def getParameterFromList(self, list_indx): return self.params_list[list_indx] #get parameters from a location in the list
Beispiel #4
0
    def Test11(self):  # Description: Generating Images for Interpolated Grasps
        # Oriignal is grey
        # 180 + noise grasp is pink
        # interpolated is violet

        # do STLs with noise after this.

        V = Vis()
        Hand1 = HandVis(V)
        Hand1.loadHand()
        Hand2 = HandVis(V)
        Hand2.loadHand()
        Obj1 = ObjectVis(V)
        Obj1.loadObject(4)
        Obj1.changeColor('green')

        Data = ParseGraspData()
        Data.parseOutputData()
        Data.parseAllTransforms()
        # obj4_cluster13_sub5_grasp2_optimal0_prime.jpg obj4_cluster13_sub5_grasp3_extreme1_target.jpg -- don't have the data.  Tried without class but extreme grasp was too far!
        # grasp1 = Data.findGrasp(objnum = 4, subnum = 5, graspnum = 2, grasptype = 'optimal0', list2check = Data.all_transforms)
        # grasp2 = Data.findGrasp(objnum = 4, subnum = 5, graspnum = 3, grasptype = 'extreme1', list2check = Data.all_transforms)
        # obj4_cluster8_sub4_grasp14_extreme1_prime.jpg obj4_cluster8_sub4_grasp14_optimal0_target.jpg -- don't have the data
        # grasp1 = Data.findGrasp(objnum = 4, subnum = 4, graspnum = 14, grasptype = 'extreme1', list2check = Data.all_transforms)
        # grasp2 = Data.findGrasp(objnum = 4, subnum = 4, graspnum = 14, grasptype = 'optimal0', list2check = Data.all_transforms)
        # obj4_cluster8_sub4_grasp14_extreme1_prime.jpg obj4_cluster8_sub4_grasp9_optimal0_target.jpg -- don't have the data

        case = 4

        #ones that I think look good
        if case == 1:
            grasp1 = Data.findGrasp(objnum=4,
                                    subnum=4,
                                    graspnum=11,
                                    grasptype='optimal0',
                                    list2check=Data.all_transforms)
            grasp2 = Data.findGrasp(objnum=4,
                                    subnum=4,
                                    graspnum=11,
                                    grasptype='optimal0',
                                    list2check=Data.all_transforms)
            filename_base = "obj%s_sub%s_graspnum%s_%s_%s" % (
                4, 4, 11, 'optimal0', 'optimal0')
        elif case == 2:
            grasp1 = Data.findGrasp(objnum=4,
                                    subnum=5,
                                    graspnum=2,
                                    grasptype='optimal0',
                                    list2check=Data.all_transforms)
            grasp2 = Data.findGrasp(objnum=4,
                                    subnum=5,
                                    graspnum=3,
                                    grasptype='extreme1',
                                    list2check=Data.all_transforms)
            filename_base = "obj%s_sub%s_graspnum%s_%s_%s" % (
                4, 5, '2&3', 'optimal0', 'extreme1')
        elif case == 3:
            grasp1 = Data.findGrasp(objnum=4,
                                    subnum=4,
                                    graspnum=14,
                                    grasptype='extreme1',
                                    list2check=Data.all_transforms)
            grasp2 = Data.findGrasp(objnum=4,
                                    subnum=4,
                                    graspnum=14,
                                    grasptype='optimal0',
                                    list2check=Data.all_transforms)
            filename_base = "obj%s_sub%s_graspnum%s_%s_%s" % (
                4, 4, 14, 'extreme1', 'optimal0')
        elif case == 4:
            grasp1 = Data.findGrasp(objnum=4,
                                    subnum=4,
                                    graspnum=14,
                                    grasptype='extreme1',
                                    list2check=Data.all_transforms)
            grasp2 = Data.findGrasp(objnum=4,
                                    subnum=4,
                                    graspnum=9,
                                    grasptype='optimal0',
                                    list2check=Data.all_transforms)
            filename_base = "obj%s_sub%s_graspnum%s_%s_%s" % (
                4, 4, '14&9', 'extreme1', 'optimal0')

        HandT1, ObjT1, Arm_JA1, Hand_JA1 = Data.matricesFromGrasp(grasp1[0])
        HandT2, ObjT2, Arm_JA2, Hand_JA2 = Data.matricesFromGrasp(grasp2[0])
        Hand1.orientHandtoObj(HandT1, ObjT1, Obj1)
        Hand1.setJointAngles(Hand_JA1)
        Hand1.changeColor('greyI')
        Hand2.orientHandtoObj(HandT2, ObjT2, Obj1)
        Hand2.setJointAngles(Hand_JA2)
        Hand2.changeColor('pinkI')
        Hand1.getPalmPoint()
        contact_points1, contact_links1 = Hand1.retractFingers(Obj1)
        contact_points2, contact_links2 = Hand2.retractFingers(Obj1)

        start_axis_angle = np.array([0, 0, 0])
        end_axis_angle = np.array([0, np.pi, 0])
        Hand2.ZSLERP(start_axis_angle,
                     end_axis_angle,
                     1,
                     T_zero=Hand2.obj.GetTransform())
        Hand3 = HandVis(V)
        Hand3.loadHand()
        Hand3.makeEqual(Hand1)
        Hand3.changeColor('blueI')
        alpha = np.linspace(0, 1, 6)[1:-1]

        if case == 1:
            V.setCamera(60, 0, 5 / 4 * np.pi - 0.75)
        elif case == 2:
            V.setCamera(60, np.pi / 4 - 1.5, -np.pi - 1)
        elif case == 3:
            V.setCamera(40, np.pi / 3 + 0.2, -np.pi - 0.5)
        elif case == 4:
            V.setCamera(40, np.pi / 3 - np.pi, -np.pi - 0.75)
            # pdb.set_trace()

        # function to get out dist, az, el of camera from current view
        for a in alpha:
            filename = filename_base + "_alpha%s.png" % (int(10 * a))
            Hand3.ZSLERP(start_axis_angle,
                         end_axis_angle,
                         a,
                         T_zero=Hand1.obj.GetTransform())
            V.clearPoints()
            V.takeImage(filename)
            time.sleep(1)
class TrainingVideo(object):
	def __init__(self):
		self.vis = Vis()
		self.Hand = HandVis(self.vis)
		self.Hand.loadHand()
		self.Obj = ObjectGenericVis(self.vis)
		self.GP = AddGroundPlane(self.vis)
		self.demoObj = list()
		self.frameCount = 0
		self.start_offset = 0

	def recordFrame(self):
		fn = 'Image%04d.png' %self.frameCount
		self.vis.takeImage(fn, delay = False)
		self.frameCount += 1

	# Kadon Engle - last edited 07/14/17
	def fingerRecord(self, oldJA, newJA): # Records a frame for multiple join angles between the starting array (OldJA) amd the ending array (newJA)
		try:
			frame_rate = 20 # An arbitrary base line for the frames in each hand movement
			frames = int(max(abs(newJA - oldJA)) * frame_rate)
			Angles = []
			self.Hand.setJointAngles(oldJA)
			if len(self.Hand.getContactPoints()) > 0:
				self.Hand.setJointAngles(iP[-1])
			for i in range(frames):
				Angles.append(list())
			for i in range(len(oldJA)):
				current = np.linspace(oldJA[i], newJA[i], frames)
				for k in range(frames):
					Angles[k].append(current[k])
			for n, i in enumerate(Angles): #More work is necessary on detecting finger collision to stop the fingers
				self.Hand.setJointAngles(np.array(i))
				if len(self.Hand.getContactPoints()) > 0:
					self.Hand.setJointAngles(Angles[n-1])
					return Angles[n-1]
				self.recordFrame()
			return Angles[n-1]

		except:
			print "Invalid Joint Angle"

	# Kadon Engle - last edited 07/14/17
	def handRecord(self, x, y, z): # Records frames as hand moves in x, y, and/or z direction
		self.T_current = self.Hand.obj.GetTransform()
		T = copy.deepcopy(self.T_current)
		xyz = [x, y, z]
		frames = 20 # Arbitrary value, needs to be changed so that when x, y, or z moves shorter distances, it records less frames. Should be changed so that it records less frames for shorter movements and more frames for longer movements.
		for idx, i in enumerate(xyz):
			if abs(i - self.T_current[idx,3]) > 0.1e-5:
				V = np.linspace(self.T_current[idx,3], i, frames)
				for n in V:
					T[idx,3] = n
					self.Hand.obj.SetTransform(T)
					self.recordFrame()

	def stationaryRecord(self, frames):
		for i in range(int(frames)):
			self.recordFrame()

	def createVideo(self, fn): # creates a video from images recorded in previous section
		# there is a built in openrave function which would be much better, but doesn't work on all hardware
		
		# uses PIL and cv2. opencv is a pain to setup, so may not be worth the hassle
		try:
			if os.path.splitext(fn)[1] != '.avi':
				print("Did not save Video: Invalid File Name")
				return
			initImage = Image.open('Image0001.png')
			height, width, layers = np.array(initImage).shape
			fourcc = cv2.VideoWriter_fourcc(*'XVID')
			video = cv2.VideoWriter(fn, fourcc, 24, (width, height))
			Files = os.listdir(curdir)
			for file in np.sort(Files):
				if os.path.splitext(file)[1] == '.png':
					image = Image.open(file)
					video.write(cv2.cvtColor(numpy.array(image), cv2.COLOR_RGB2BGR))
			video.release()
		except:
			print 'Something went wrong. Maybe you didn\'t record any images'

		# native linux method -- most likely to work
		# subprocess.call(["avconv", "-f", "image2", "-i", "Image%04d.png", "-r", "5", "-s", "800x600", fn+".avi"])

	def removeImages(self): # remove image files that were created
		Files = os.listdir(curdir)
		for file in Files:
			if os.path.splitext(file)[1] == '.png':
				os.remove(file)

	def VLCPlay(self, fn):
		subprocess.call(["vlc", fn])

	def Video1(self): 
		# setup
		self.Obj.loadObject('cube',36,18,3,None)  # TODO: Object with larger extent!
		self.Obj.changeColor('purpleI')
		self.Hand.changeColor('mustard')
		self.GP.createGroundPlane(0.175)
		extent_offset = -3.0/100 # offset by thickness of object
		palm_offset = -0.075 #offset so palm is at 0,0,0
		clearance_offset = -1.0/100 # offset to have clearance between palm and object
		self.start_offset = extent_offset + palm_offset + clearance_offset
		self.Hand.localTranslation(np.array([0, 0, self.start_offset]))
		rot = matrixFromAxisAngle([0,0, np.pi/2])
		self.Hand.localRotation(rot) # rotate hand to put stationary finger on one side of object
		self.T_start = self.Hand.obj.GetTransform() # get starting transform so everything can be done relative to this
		oHand = np.array([0, 0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) # position fingers such that hand can move in x, y, and z without hitting object
		cHand = np.array([0, 0, 0.0, 1.3, 0.4, 0.0, 1.3, 0.4, 1.3, 0.4]) # position fingers such that the hand will be closed around the object without hititng it
		self.Hand.setJointAngles(oHand)
		dist_range_min = -0.1
		dist_range_max = 0.1
		frame_rate = 20/0.1 # frames/cm
		# Different arbitrary camera angles for needed viewpoints
		# self.vis.setCamera(60,3*np.pi/4,-np.pi/4-0.1)
		# self.vis.setCamera(60, np.pi, -np.pi/2) # top view
		# self.vis.setCamera(60, -2.25, -np.pi/2.10)
		self.vis.setCamera(60, -2.25, -0.75) # Numbers are completely arbitrary, gives a good view of object and hand.