Beispiel #1
0
def launch_server(host, port):
    # to avoid cluttering the terminal...
    if rank == 0:
        print("launch with a lot of output, this is rank 0")
        tensorforce_environment = resume_env(plot=False, step=100, dump=100)

    else:
        print("Launch with less output, this is higher rank")
        # tensorforce_environment = resume_env(plot=False, step=100, dump=100)
        # still dump to ensure the CSVs are present, this will look messy in command line...
        tensorforce_environment = resume_env(plot=False, step=100, dump=100)

    RemoteEnvironmentServer(tensorforce_environment, host=host, port=port)
Beispiel #2
0
def launch_server(host, port, verbose=0):
    tensorforce_environment = resume_env(rank)

    RemoteEnvironmentServer(tensorforce_environment,
                            host=host,
                            port=port,
                            verbose=verbose)
def launch_server(host, port):
    # to avoid cluttering the terminal...
    if rank == 0:
        print("launch with a lot of output, this is rank 0")
        tensorforce_environment = resume_env(plot=False, dump_debug=1)
        RemoteEnvironmentServer(tensorforce_environment, host=host, port=port)

    else:
        print("Launch with less output, this is higher rank")
        tensorforce_environment = resume_env(plot=False,
                                             dump_debug=1,
                                             dump_CL=False)  # No CL output
        RemoteEnvironmentServer(tensorforce_environment,
                                host=host,
                                port=port,
                                verbose=0)
'''
Perform a single run of the flow without control
'''
import os
import socket
import numpy as np
import csv

from tensorforce.agents import Agent
from tensorforce.execution import ParallelRunner

from env import resume_env, nb_actuations, simulation_duration

example_environment = resume_env(plot=False,
                                 dump_CL=100,
                                 dump_debug=1,
                                 dump_vtu=50,
                                 single_run=True)

deterministic = True

network = [dict(type='dense', size=512), dict(type='dense', size=512)]

if (os.path.exists("saved_models/test_strategy.csv")):
    os.remove("saved_models/test_strategy.csv")

if (os.path.exists("saved_models/test_strategy_avg.csv")):
    os.remove("saved_models/test_strategy_avg.csv")


def one_run():
import env
import os
import csv

"""
import sys
import os
cwd = os.getcwd()
sys.path.append(cwd + "/../Simulation/")

from Env2DCylinder import Env2DCylinder
"""

printi("resume env")

environment = resume_env(plot=500, dump=10, single_run=True)
deterministic=True

printi("define network specs")

network_spec = [
    dict(type='dense', size=512),
    dict(type='dense', size=512),
]

printi("define agent")

printiv(environment.states)
printiv(environment.actions)
printiv(network_spec)
Beispiel #6
0
import env
env.resume_env(plot=500, remesh=True)
import env
env.resume_env(plot=False, remesh=True)
import env
env.resume_env(plot=False, remesh=True, dump_debug=1, dump_vtu=50)
Beispiel #9
0
import numpy as np
from tensorforce.agents import PPOAgent
from tensorforce.execution import Runner

"""
import sys
import os
cwd = os.getcwd()
sys.path.append(cwd + "/../Simulation/")

from Env2DCylinder import Env2DCylinder
"""

printi("resume env")

environment = resume_env(plot=False, step=50, dump=100)

printi("define network specs")

network_spec = [
    dict(type='dense', size=512),
    dict(type='dense', size=512),
]

printi("define agent")

printiv(environment.states)
printiv(environment.actions)
printiv(network_spec)

agent = PPOAgent(
Beispiel #10
0
import numpy as np
from tensorforce.agents import PPOAgent
from tensorforce.execution import Runner

"""
import sys
import os
cwd = os.getcwd()
sys.path.append(cwd + "/../Simulation/")

from Env2DCylinder import Env2DCylinder
"""

printi("resume env")

environment = resume_env(plot=200, step=100, dump=100)

printi("define network specs")

network_spec = [
    dict(type='dense', size=512),
    dict(type='dense', size=512),
]

printi("define agent")

printiv(environment.states)
printiv(environment.actions)
printiv(network_spec)

agent = PPOAgent(
from tensorforce.execution import Runner
import env
import os
import csv
"""
import sys
import os
cwd = os.getcwd()
sys.path.append(cwd + "/../Simulation/")

from Env2DCylinder import Env2DCylinder
"""

printi("resume env")

environment = resume_env(dump=10, single_run=True)
deterministic = True

printi("define network specs")

network_spec = [
    dict(type='dense', size=512),
    dict(type='dense', size=512),
]

printi("define agent")

printiv(environment.states)
printiv(environment.actions)
printiv(network_spec)
# 
# The default running time steps are used for Re=200, and it may run a
# little long time, you can change the value of max_nbr_actuations
#
#----------------------------------------------------------------
import os
import socket
import numpy as np
import csv

from tensorforce.agents import Agent
from tensorforce.execution import ParallelRunner

from env import resume_env, nb_actuations

example_environment = resume_env(plot=False, dump=100, single_run=True)

deterministic = True

network = [dict(type='dense', size=512), dict(type='dense', size=512)]

agent = Agent.create(
    # Agent + Environment
    agent='ppo', environment=example_environment, max_episode_timesteps=nb_actuations,
    # TODO: nb_actuations could be specified by Environment.max_episode_timesteps() if it makes sense...
    # Network
    network=network,
    # Optimization
    batch_size=20, learning_rate=1e-3, subsampling_fraction=0.2, optimization_steps=25,
    # Reward estimation
    likelihood_ratio_clipping=0.2, estimate_terminal=True,  # ???