예제 #1
0
def run():
    app = QtWidgets.QApplication(sys.argv)
    form = Form()
    ui = Ui_Form()
    ui.setupUi(form)
    ui.set_controller(controller.make_controller(ui))
    form.show()
    sys.exit(app.exec_())
예제 #2
0
    def set_controller(self, arg, clear=True):
        if type(arg) is str:
            cont = load_controller(arg)

        else:
            name = arg.pop("name")
            cont = make_controller(name, arg)

        if not clear:
            self.controllers.append(cont)

        else:
            self.controllers = [cont]
예제 #3
0
    'Y3': Axis(bpr, 'BRICK1CS3:Y', 8, 3)
}

brick_groups = {
    'g1': '1,2->A,B',
    'g2': '3,4->I',
    'g3': 'MIXED CS3',
    'g4': 'MIXED CS2'
}

brick_cs = {
    'cs2': CoordSys('BRICK1:CS2', 2, 'CS2'),
    'cs3': CoordSys('BRICK1:CS3', 3, 'CS3')
}

MyBrick = make_controller(brick_axes, brick_cs, brick_groups, brick_pv_root)


class TestBrick(MyBrick):
    # noinspection PyAttributeOutsideInit
    def startup(self, init=True):
        a = self.axes
        r = self.real_axes

        # axis aliases
        self.height = self.X3
        self.angle = self.Y3
        self.jack1 = self.m3
        self.jack2 = self.m4

        # useful aliases for beamline test authoring
예제 #4
0
파일: train.py 프로젝트: hlynurd/wm-norb
def initialize_settings(sigma_init=0.1, sigma_decay=0.9999):
    global population, filebase, game, controller, num_params, es, PRECISION, SOLUTION_PACKET_SIZE, RESULT_PACKET_SIZE
    population = num_worker * num_worker_trial
    filedir = 'results/{}/{}/log/'.format(exp_name, env_name)
    if not os.path.exists(filedir):
        os.makedirs(filedir)
    filebase = filedir + env_name + '.' + optimizer + '.' + str(
        num_episode) + '.' + str(population)
    controller = make_controller(args=config_args)

    num_params = controller.param_count
    print("size of model", num_params)

    if optimizer == 'ses':
        ses = PEPG(num_params,
                   sigma_init=sigma_init,
                   sigma_decay=sigma_decay,
                   sigma_alpha=0.2,
                   sigma_limit=0.02,
                   elite_ratio=0.1,
                   weight_decay=0.005,
                   popsize=population)
        es = ses
    elif optimizer == 'ga':
        ga = SimpleGA(num_params,
                      sigma_init=sigma_init,
                      sigma_decay=sigma_decay,
                      sigma_limit=0.02,
                      elite_ratio=0.1,
                      weight_decay=0.005,
                      popsize=population)
        es = ga
    elif optimizer == 'cma':
        cma = CMAES(num_params, sigma_init=sigma_init, popsize=population)
        es = cma
    elif optimizer == 'pepg':
        pepg = PEPG(num_params,
                    sigma_init=sigma_init,
                    sigma_decay=sigma_decay,
                    sigma_alpha=0.20,
                    sigma_limit=0.02,
                    learning_rate=0.01,
                    learning_rate_decay=1.0,
                    learning_rate_limit=0.01,
                    weight_decay=0.005,
                    popsize=population)
        es = pepg
    else:
        oes = OpenES(num_params,
                     sigma_init=sigma_init,
                     sigma_decay=sigma_decay,
                     sigma_limit=0.02,
                     learning_rate=0.01,
                     learning_rate_decay=1.0,
                     learning_rate_limit=0.01,
                     antithetic=antithetic,
                     weight_decay=0.005,
                     popsize=population)
        es = oes

    PRECISION = 10000
    SOLUTION_PACKET_SIZE = (5 + num_params) * num_worker_trial
    RESULT_PACKET_SIZE = 4 * num_worker_trial
예제 #5
0
import numpy as np
import random
import os
import gym

from env import make_env
from controller import make_controller

from utils import PARSER

args = PARSER.parse_args()
dir_name = 'results/{}/{}/record'.format(args.exp_name, args.env_name)
if not os.path.exists(dir_name):
    os.makedirs(dir_name)

controller = make_controller(args=args)

total_frames = 0
env = make_env(args=args,
               render_mode=args.render_mode,
               full_episode=args.full_episode,
               with_obs=True,
               load_model=False)

for trial in range(args.max_trials):
    try:
        random_generated_int = random.randint(0, 2**31 - 1)
        filename = dir_name + "/" + str(random_generated_int) + ".npz"
        recording_frame = []
        recording_action = []
        recording_reward = []
예제 #6
0
def main():
    print("Setting niceness to 19")
    if "nice" in os.__dict__:
        os.nice(19)

    args = PARSER.parse_args()

    def make_env_with_args():
        return make_env(args=args, keep_image=True, wrap_rnn=False)

    dir_name = get_path(args, "record", create=True)

    controller = None
    if args.extract_use_controller:
        controller = make_controller(args=args)
    env = make_env_with_args()

    has_camera_data = isinstance(
        env.observation_space,
        gym.spaces.Dict) and "camera" in env.observation_space.spaces

    format_str = "[{success:s}] {done:s} after {frames:4d} frames, reward {reward:6.1f} " \
                 "(Total: {total_frames:7d} frames, {successful_trials:3d}/{total_trials:3d} successful trials)"

    total_frames = 0
    successful_trials = 0
    for trial in range(args.max_trials):
        try:
            seed = random.randint(0, 2**31 - 1)
            filename = dir_name / (str(seed) + ".npz")

            np.random.seed(seed)
            env.seed(seed)

            recording_image = []
            recording_camera = []
            recording_action = []
            recording_reward = []
            recording_done = []

            # random policy
            if args.extract_use_controller:
                controller.init_random_model_params(stddev=np.random.rand() *
                                                    0.01)
            repeat_action = np.random.randint(1, 11)
            action = [0] * args.a_width

            total_reward = 0
            obs = env.reset()

            frame = 0
            ended_early = False
            for frame in range(args.max_frames):
                # Save current observation
                recording_image.append(obs["image"])
                if has_camera_data:
                    recording_camera.append(obs["camera"])

                # Get next action (random)
                if not args.extract_repeat_actions or frame % repeat_action == 0:
                    if args.extract_use_controller:
                        action = controller.get_action(obs["features"])
                    else:
                        action = np.random.rand(args.a_width) * 2.0 - 1.0
                    if args.extract_repeat_actions:
                        repeat_action = np.random.randint(1, 11)

                # Save action
                recording_action.append(action)

                # Perform action
                obs, reward, done, _info = env.step(action)
                total_reward += reward

                # Save reward and done flag
                recording_reward.append(reward)
                recording_done.append(done)

                # Stop when done
                if done:
                    ended_early = True
                    break

            total_frames += (frame + 1)
            enough_frames = len(recording_image) >= args.min_frames

            # Save episode to disk (if it has required minimum length)
            if enough_frames:
                successful_trials += 1

                recording_image = np.array(recording_image, dtype=np.uint8)
                recording_camera = np.array(recording_camera, dtype=np.float16)
                recording_action = np.array(recording_action, dtype=np.float16)
                recording_reward = np.array(recording_reward, dtype=np.float16)
                recording_done = np.array(recording_done, dtype=np.bool)

                data = {
                    "image": recording_image,
                    "action": recording_action,
                    "reward": recording_reward,
                    "done": recording_done
                }
                if has_camera_data:
                    data["camera"] = recording_camera

                np.savez_compressed(str(filename), **data)

            print(
                format_str.format(success="O" if enough_frames else " ",
                                  done="Done" if ended_early else "Stop",
                                  frames=frame + 1,
                                  reward=total_reward,
                                  total_frames=total_frames,
                                  successful_trials=successful_trials,
                                  total_trials=trial + 1))

        except gym.error.Error as e:
            print("Gym raised an error: " + str(e))
            env.close()
            env = make_env_with_args()

    env.close()