Exemple #1
0
config['multiMapProbs'] = [1.0]

with open('config_test.json', 'w') as f:
    json.dump(config, f)

agent.env = ActiveParticle3DEnv('config_test.json', 1,
                                obstacleConstructorCallBack)

dynamicObsMover = DynamicObstacleMover(agent.env)

targets = [[0, 0, 499]]
finalTarget = [0, 0, 499]
nTargets = len(targets)
nTraj = 2
endStep = 500
guide = PathGuiderStraightLine()

recorder = []
for i in range(nTraj):
    print(i)
    target = guide.getTrajPos()
    agent.env.config['targetState'] = target
    state = agent.env.reset()
    dynamicObsMover.initialize()
    dynamicObsMover.saveObstacles()

    done = False
    rewardSum = 0
    stepCount = 0
    info = [i, stepCount] + agent.env.currentState.tolist(
    ) + agent.env.targetState.tolist() + [0.0 for _ in range(N_A)]
Exemple #2
0
config['multiMapProbs'] = [1.0]

with open('config_test.json', 'w') as f:
    json.dump(config, f)

agent.env = ActiveParticle3DEnv('config_test.json', 1,
                                obstacleConstructorCallBack)

finalTarget = [0, 0, 499]

nTraj = 20
endStep = 500

recorder = []

guide = PathGuiderStraightLine()

for i in range(nTraj):
    print(i)
    guide.reset()
    target = guide.getTrajPos()
    agent.env.config['targetState'] = target
    state = agent.env.reset()

    done = False
    rewardSum = 0
    stepCount = 0
    info = [i, stepCount] + agent.env.currentState.tolist(
    ) + agent.env.targetState.tolist() + [0.0 for _ in range(N_A)]
    recorder.append(info)
    for stepCount in range(endStep):
Exemple #3
0
config['vesselCapFlag'] = False

with open('config_test.json', 'w') as f:
    json.dump(config, f)

agent.env = ActiveParticle3DEnv('config_test.json', 1,
                                obstacleConstructorCallBack)

finalTarget = [0, 0, 599]

nTraj = 5
endStep = 500

recorder = []

guide = PathGuiderStraightLine()

for i in range(nTraj):
    print(i)
    guide.reset()
    guide.set_t(7)
    target = guide.getTrajPos()
    agent.env.config['targetState'] = target
    state = agent.env.reset()

    done = False
    rewardSum = 0
    stepCount = 0
    info = [i, stepCount] + agent.env.currentState.tolist(
    ) + agent.env.targetState.tolist() + [0.0 for _ in range(N_A)]
    recorder.append(info)