Ejemplo n.º 1
0
        action0 = torch.sigmoid(self.linear3_1(x))
        action1 = torch.tanh(self.linear3_2(x))
        action = torch.cat([action0, action1], dim=1)
        return action

    def select_action(self, state, noiseFlag = False):
        if noiseFlag:
            action = self.forward(state)
            action += torch.tensor(self.noise.get_noise(), dtype=torch.float32, device=config['device']).unsqueeze(0)
        return self.forward(state)

configName = 'config.json'
with open(configName,'r') as f:
    config = json.load(f)

env = ActiveParticleEnv('config.json',1)

N_S = 2
N_A = env.nbActions

netParameter = dict()
netParameter['n_feature'] = N_S
netParameter['n_hidden'] = 128
netParameter['n_output'] = N_A

actorNet = Actor(netParameter['n_feature'],
                                    netParameter['n_hidden'],
                                    netParameter['n_output'])

actorTargetNet = deepcopy(actorNet)
Ejemplo n.º 2
0
    def select_action(self, state, noiseFlag=False):
        if noiseFlag:
            action = self.forward(state)
            action += torch.tensor(self.noise.get_noise(),
                                   dtype=torch.float32,
                                   device=config['device']).unsqueeze(0)
            action = torch.clamp(action, 0, 1)
            return action
        return self.forward(state)


configName = 'config.json'
with open(configName, 'r') as f:
    config = json.load(f)

env = ActiveParticleEnv('config.json', 1)

N_S = 2
N_A = env.nbActions

netParameter = dict()
netParameter['n_feature'] = N_S
netParameter['n_hidden'] = 100
netParameter['n_output'] = N_A

actorNet = Actor(netParameter['n_feature'], netParameter['n_hidden'],
                 netParameter['n_output'])

actorTargetNet = deepcopy(actorNet)

criticNet = Critic(netParameter['n_feature'] + N_A, netParameter['n_hidden'])
Ejemplo n.º 3
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May  1 23:55:51 2019

@author: yangyutu123
"""

from activeParticleEnv import ActiveParticleEnv
import numpy as np
import math
import random

#import activeParticleSimulatorPython as model

env = ActiveParticleEnv('config_obs.json',1)

step = 20

state = env.reset()
print(state)

for i in range(step):
    state = env.currentState
    u = math.cos(state[2])*0.1
    v = math.sin(state[2])*0.1   
    w = 1.0
    nextState, reward, action, info = env.step(np.array([w]))
    print(nextState)
    print(info)
    #if i%2 == 0 and i < 10: