Ejemplo n.º 1
0
def main():
	#load the environment
	env_file_list = os.listdir(DATA_DIR+'/env_files/')
	reward_matrix = []
	for env_idx in range(1,len(env_file_list)):
		train_env_f_name = get_file_name_with_given_idx(env_file_list,env_idx)

		# load the environment file
		print train_env_f_name

		env=Environment()
		env.Reset()
#		env.SetViewer('qtcoin')
		simple_prob=separate(env,0)
		floor = env.GetKinBody("floorwalls")
		floor.Enable(False)
		oracle = ManipulationOracle(simple_prob,env)

		robot = env.GetRobots()[0]
		manipulator = robot.SetActiveManipulator('leftarm_torso') 
	

		## Recovering the environment
		restore_env(env,train_env_f_name,DATA_DIR)
		target = env.GetKinBody('ObjToG')

		gmodel = databases.grasping.GraspingModel(robot,target)
		if not gmodel.load():
			gmodel.autogenerate()
		ikmodel = databases.inversekinematics.InverseKinematicsModel(\
				robot,iktype=IkParameterization.Type.Transform6D)
		if not ikmodel.load():
			ikmodel.autogenerate()
	

		target.Enable(False)
		grasps, grasp_indices = gmodel.computeValidGrasps(checkcollision=True,checkik=False)
		thetas = sample_thetas(robot,oracle,gmodel,grasps,grasp_indices,env,3)

		f_name = THETA_DIR + 'env_' + str(env_idx)+'.mat' 
		sio.savemat(f_name,{'thetas':thetas})
def main():
	theta_values=[]
	ik_times = []
	plan_times = []
	theta_augmented_with_grasp = []

	env_file_list = os.listdir(DATA_DIR+'/env_files/')
	reward_matrix = []
	theta_file_template = 'env_'
	if not os.path.isfile(SAVE_DIR+'thetas.mat'):
		thetas = get_all_thetas(theta_file_template)
		sio.savemat( SAVE_DIR + 'thetas.mat',{'thetas':thetas})
	else:
		thetas = sio.loadmat( SAVE_DIR + 'thetas.mat')['thetas']
#	goal_config_list = get_goal_config_from_theta_list(thetas)
#	sio.savemat( SAVE_DIR + 'goal_config_list.mat',{'goal_config_list':goal_config_list})
	
	if len(sys.argv) == 1:
		print "NEED ENV NUMBER"
		return 0 
	env_idx = int(sys.argv[1])
	train_env_f_name = get_file_name_with_given_idx(env_file_list,env_idx)

	reward_file = SAVE_DIR + 'reward_matrix' +str(env_idx)+'.mat'
	if os.path.isfile(reward_file):
		print train_env_f_name +' already done'
		return 0
	print "SPAWNED: " + train_env_f_name

	# load the environment file
#	print train_env_f_name

	env=Environment()
	env.Reset()

	simple_prob=separate(env,0)
	floor = env.GetKinBody("floorwalls")
	floor.Enable(False)
	oracle = ManipulationOracle(simple_prob,env)

	#env.SetViewer('qtcoin')
	## Recovering the environment
	restore_env(env,train_env_f_name,DATA_DIR)

	# set the robot to default configuration
	robot = env.GetRobots()[0]
	manipulator = robot.SetActiveManipulator('leftarm_torso') 
	set_default_robot_config(robot)  # set arms to the pregrasp pose
	robot.SetActiveDOFs(manipulator.GetArmIndices())
		
	evaluator = ThetaEvaluator(env,oracle)
	env_theta_vals = []
	env_plan_time = []
	env_ik_time= []

	tst = time.time()
	for theta_idx in range(np.shape(thetas)[0]):
		print "Running ENV " + str(theta_idx) 
		theta = thetas[theta_idx,:]
		if len(theta) is not 0:
			base_pose = theta[0:3]
			grasp     = theta[3:]
			each = time.time()
			theta_val,ik_time,plan_time = evaluator.get_true_vals_from_base_pose_and_grasp(grasp,base_pose)
		else:
			theta_val = 0
		env_theta_vals.append(theta_val)
		env_plan_time.append(plan_time)
		env_ik_time.append(ik_time)
	theta_values.append(env_theta_vals)
	ik_times.append(env_ik_time)
	plan_times.append(env_plan_time)
	
	sio.savemat(SAVE_DIR + 'reward_matrix' +str(env_idx)+'.mat',\
		{'reward_matrix':theta_values,'ik_times':ik_times,'plan_times':plan_times})

	env.Destroy()
	print "ENV " + str(env_idx) + "FINISHED RUNNING"
Ejemplo n.º 3
0
def main():
	env_file_order = sio.loadmat(DATA_DIR+'env_file_order.mat')['file_orders'][0]
	theta_values=[]
	theta_augmented_with_grasp = []
	for env_idx in range(160,len(env_file_order)):
		# load the environment file
		env_order = env_file_order[env_idx]
		train_env_f_name = 'scene'+str(env_order)+'_env.dae'
		print train_env_f_name

		#theta_file_template = 'good_thetas_'
		theta_file_template = 'good_thetas_leslie_'
		thetas = sio.loadmat(THETA_DIR+theta_file_template+str(env_idx)+'.mat')['thetas']
		if theta_file_template is not 'good_thetas_leslie_':
			#artificial_vals= sio.loadmat(THETA_DIR+'good_thetas_'+\
			#				str(env_idx)+'.mat')['values']
			#thetas = thetas[artificial_vals[0]>0,:]
			thetas = get_all_thetas(theta_file_template)
		else:
			thetas = get_all_thetas(theta_file_template)
		#	sio.savemat('theta_values_leslie.mat',{'thetas':thetas})
		env=Environment()
		env.Reset()

		simple_prob=separate(env,0)
		floor = env.GetKinBody("floorwalls")
		floor.Enable(False)
		oracle = ManipulationOracle(simple_prob,env)

		#env.SetViewer('qtcoin')
		## Recovering the environment
		restore_env(env,train_env_f_name,DATA_DIR)

		# set the robot to default configuration
		robot = env.GetRobots()[0]
		manipulator = robot.SetActiveManipulator('leftarm_torso') 
		set_default_robot_config(robot)  # set arms to the pregrasp pose
		robot.SetActiveDOFs(manipulator.GetArmIndices())
			
		evaluator = ThetaEvaluator(env,oracle)
		env_theta_vals = []
		for theta_idx in range(np.shape(thetas)[0]):
			theta = thetas[theta_idx,:]
			if theta_file_template == 'good_thetas_leslie_':
				if len(theta) is not 0:
					base_pose = theta[0:3]
					grasp     = theta[3:]
					theta_val,ik_time,plan_time = evaluator.get_true_vals_from_base_pose_and_grasp(grasp,base_pose)
				else:
					theta_val = 0
					
			else:
				theta_val,g = evaluator.get_true_vals(theta)
				if g is not None:
					print g
					theta_augmented_with_grasp.append( np.r_[theta,g] )
				else:
					theta_augmented_with_grasp.append( np.r_[theta] )
			env_theta_vals.append(theta_val)
		theta_values.append(env_theta_vals)
		if theta_file_template == 'good_thetas_leslie_':
			sio.savemat('theta_rewards_leslile5.mat',{'reward':theta_values})
def main():
	theta_values=[]
	theta_augmented_with_grasp = []

	env_file_list = os.listdir(DATA_DIR+'/env_files/')
	reward_matrix = []
	theta_file_template = 'env_'
	thetas = get_all_thetas(theta_file_template)
	sio.savemat( SAVE_DIR + 'thetas.mat',{'thetas':thetas})
#	goal_config_list = get_goal_config_from_theta_list(thetas)
#	sio.savemat( SAVE_DIR + 'goal_config_list.mat',{'goal_config_list':goal_config_list})

	for env_idx in range(len(env_file_list)):
		train_env_f_name = get_file_name_with_given_idx(env_file_list,env_idx)

		# load the environment file
		print train_env_f_name


		env=Environment()
		env.Reset()

		simple_prob=separate(env,0)
		floor = env.GetKinBody("floorwalls")
		floor.Enable(False)
		oracle = ManipulationOracle(simple_prob,env)

		#env.SetViewer('qtcoin')
		## Recovering the environment
		restore_env(env,train_env_f_name)

		# set the robot to default configuration
		robot = env.GetRobots()[0]
		manipulator = robot.SetActiveManipulator('leftarm_torso') 
		set_default_robot_config(robot)  # set arms to the pregrasp pose
		robot.SetActiveDOFs(manipulator.GetArmIndices())
			
		evaluator = ThetaEvaluator(env,oracle)
		env_theta_vals = []
		env_plan_time = []
		env_ik_time= []
		tst = time.time()
		for theta_idx in range(np.shape(thetas)[0]):
			theta = thetas[theta_idx,:]
			if len(theta) is not 0:
				base_pose = theta[0:3]
				grasp     = theta[3:]
				each = time.time()
				theta_val,ik_time,plan_time = evaluator.get_true_vals_from_base_pose_and_grasp(grasp,base_pose)
			else:
				theta_val = 0
			env_theta_vals.append(theta_val)
			env_plan_time.append(plan_time)
			env_ik_time.append(ik_time)
		print time.time() - tst
		theta_values.append(env_theta_vals)
		ik_times.append(env_ik_time)
		plan_times.append(env_plan_time)
		sio.savemat(SAVE_DIR + 'reward_matrix.mat',{'reward_matrix':theta_values,'ik_times':ik_times,'plan_times':plan_times})

		env.Destroy()