param = DoubleIntegratorParam()
	env = DoubleIntegrator(param)

	if args.il:
		run(param, env, None, None, args)
		exit()

	controllers = {
		# 'emptywapf': Empty_Net_wAPF(param,env,torch.load('../results/doubleintegrator/exp1Empty_0/il_current.pt')),
		# 'e2e':torch.load('../results/doubleintegrator/exp1Barrier_0/il_current.pt'),
		# 'empty':torch.load('../results/doubleintegrator/exp1Empty_0/il_current.pt'),

		# 'current':torch.load(param.il_train_model_fn),
		# 'current_wapf': Empty_Net_wAPF(param,env,torch.load(param.il_train_model_fn)),
		# 'gg': GoToGoalPolicy(param,env),
		'apf': Empty_Net_wAPF(param,env,GoToGoalPolicy(param,env)),
		# 'zero': Empty_Net_wAPF(param,env,ZeroPolicy(env))
	}

	s0 = load_instance(param, env, args.instance)

	if args.batch:
		if args.controller:
			controllers = dict()
			for ctrl in args.controller:
				name,kind,path = ctrl.split(',')
				if kind == "EmptyAPF":
					controllers[name] = Empty_Net_wAPF(param,env,torch.load(path))
				elif kind == "torch":
					controllers[name] = torch.load(path)
				elif kind == "apf":
Beispiel #2
0
		policy_fn = '../results/singleintegrator/empty_2/il_current.pt'
		policy = torch.load(policy_fn)
	elif args.empty:
		policy_fn = '../models/singleintegrator/empty.pt'
		policy = torch.load(policy_fn)
	elif args.current:
		policy_fn = '../models/singleintegrator/il_current.pt'
		policy = torch.load(policy_fn)
	elif args.currenta:
		policy_fn = '../models/singleintegrator/ad_current.pt'
		policy = torch.load(policy_fn)
	elif args.currentvs:
		policy_fn = '../models/singleintegrator_vel_sensing/il_current.pt'
		policy = torch.load(policy_fn)
	elif args.empty_wAPF:
		policy = Empty_Net_wAPF(param,env)
	else:
		exit('no policy recognized')

	if args.instance:
		instance = args.instance
	else:
		# instance = "map_8by8_obst6_agents4_ex0000"
		instance = "map_8by8_obst6_agents4_ex0002"		
		# instance = "map_8by8_obst6_agents4_ex0010"
		print('default instance file: {}'.format(instance))
	instance_fn = "../results/singleintegrator/instances/{}.yaml".format(instance)

	with open(instance_fn) as map_file:
		map_data = yaml.load(map_file, Loader=yaml.SafeLoader)
	num_agents = len(map_data["agents"])
Beispiel #3
0
          env = SingleIntegrator(param)
          # env = SingleIntegratorVelSensing(param)
          train_il(param, env, device)

          del env
          del param

      elif args.sim:
        param = run_singleintegrator.SingleIntegratorParam()
        env = SingleIntegrator(param)
        # param = run_singleintegrator_vel_sensing.SingleIntegratorVelSensingParam()
        # env = SingleIntegratorVelSensing(param)
        
        # evaluate policy
        controllers = {
          # 'exp3BarrierS{}_{}'.format(src,i): Empty_Net_wAPF(param,env,torch.load('singleintegrator/exp3BarrierS{}_{}/il_current.pt'.format(src,i))),
          'exp3EmptyS{}_{}'.format(src,i): Empty_Net_wAPF(param,env,torch.load('singleintegrator/exp3EmptyS{}_{}/il_current.pt'.format(src,i))),
        }

        # for instance in instances:
          # run_singleintegrator.run_batch(instance, controllers)

        with Pool(24) as p:
          # p.starmap(run_singleintegrator.run_batch, zip(repeat(param), repeat(env), instances, repeat(controllers)))
          p.starmap(run_singleintegrator_vel_sensing.run_batch, zip(repeat(param), repeat(env), instances, repeat(controllers)))

        # with concurrent.futures.ProcessPoolExecutor(max_workers=12) as executor:
        #   for _ in executor.map(run_singleintegrator.run_batch, instances, repeat(controllers)):
        #     pass

Beispiel #4
0
if __name__ == '__main__':
	
	parser = argparse.ArgumentParser()
	parser.add_argument('--apf', action='store_true')
	args = parser.parse_args()

	agents_lst = [2,4,8,16,32,64]
	obst_lst = [6,12]

	if args.apf:

		folder = "doubleintegrator/apf"
		if not os.path.exists(folder):
			os.mkdir(folder)

		param = run_doubleintegrator.DoubleIntegratorParam()
		env = DoubleIntegrator(param)

		controller = {
			'apf': Empty_Net_wAPF(param,env,GoToGoalPolicy(param,env))}

		files = []
		for agent in agents_lst:
			for obst in obst_lst:
				files.extend(glob.glob("singleintegrator/instances/*obst{}_agents{}_*.yaml".format(obst,agent), recursive=True))

		with concurrent.futures.ProcessPoolExecutor(max_workers=cpu_count()) as executor:
			for _ in executor.map(run_doubleintegrator.run_batch, repeat(param), repeat(env), files, repeat(controller)):
				pass
Beispiel #5
0
      if args.train:
        for cc in ['Empty', 'Barrier']:
          param = run_doubleintegrator.DoubleIntegratorParam()
          param.il_controller_class = cc
          param.il_train_model_fn = 'doubleintegrator/exp1{}_{}/il_current.pt'.format(cc,i)
          # only load the data once
          if first_training:
            param.il_load_loader_on = False
            first_training = False
          else:
            param.il_load_loader_on = True
          env = DoubleIntegrator(param)
          train_il(param, env, device)

      elif args.sim:
        # evaluate policy
        controllers = {
          'exp1Empty_{}'.format(i): Empty_Net_wAPF(param,env,torch.load('doubleintegrator/exp1Empty_{}/il_current.pt'.format(i))),
          'exp1Barrier_{}'.format(i) : torch.load('doubleintegrator/exp1Barrier_{}/il_current.pt'.format(i))
        }

        serial_on = True 
        if serial_on: 
          for instance in instances:
            run_doubleintegrator.run_batch(param, env, instance, controllers)

        else: 
          with Pool(32) as p:
            p.starmap(run_doubleintegrator.run_batch, zip(repeat(param), repeat(env), instances, repeat(controllers)))

if __name__ == '__main__':

    args = parse_args()
    param = SingleIntegratorParam()
    env = SingleIntegrator(param)

    if args.il:
        run(param, env, None, None, args)
        exit()

    controllers = {
        # exp1
        'empty':
        Empty_Net_wAPF(
            param, env,
            torch.load(
                '../results/singleintegrator/exp1Empty_0/il_current.pt')),
        # 'empty': torch.load('../results/singleintegrator/exp1Empty_0/il_current.pt'),
        # 'empty': torch.load('/home/whoenig/pCloudDrive/caltech/neural_pid_results/exp1/exp1Empty_0/il_current.pt'),
        # 'barrier': torch.load('../results/singleintegrator/exp1Barrier_0/il_current.pt'),
        #
        # testing
        # 'apf': Empty_Net_wAPF(param,env,GoToGoalPolicy(param,env)),
        # 'current': torch.load(param.il_train_model_fn),
        # 'currentwapf': Empty_Net_wAPF(param,env,torch.load(param.il_train_model_fn)),
    }

    s0 = load_instance(param, env, args.instance)

    if args.batch:
        if args.controller:
Beispiel #7
0
            env = SingleIntegrator(param)
            train_il(param, env, device)
            del env
            del param

        elif args.sim:
          param = run_singleintegrator.SingleIntegratorParam()
          param.r_comm = r
          param.r_obs_sense = r
          param.max_neighbors = 10000 #5
          param.max_obstacles = 10000 #5

          env = SingleIntegrator(param)
          # evaluate policy
          controllers = {
            'exp2EmptyR{}td{}_{}'.format(r,td,i): Empty_Net_wAPF(param,env,torch.load('singleintegrator/exp2EmptyR{}td{}_{}/il_current.pt'.format(r,td,i))),
            'exp1BarrierR{}_td{}_{}'.format(r,td,i) : torch.load('singleintegrator/exp1Barrier_{}/il_current.pt'.format(r,td,i))
          }

          # for instance in instances:
            # run_singleintegrator.run_batch(instance, controllers)


          with Pool(12) as p:
            p.starmap(run_singleintegrator.run_batch, zip(repeat(param), repeat(env), instances, repeat(controllers)))

          # with concurrent.futures.ProcessPoolExecutor(max_workers=12) as executor:
          #   for _ in executor.map(run_singleintegrator.run_batch, instances, repeat(controllers)):
          #     pass