# Initialize policy
expert = DDPG(state_dim, action_dim, max_action, net_type="dense")
expert.load("model-here",
            directory="../duckietown_rl/pytorch_models",
            for_inference=True)

# Initialize the environment
env.reset()
# Get features(state representation) for RL agent
obs = env.get_features()
EPISODES, STEPS = 20, 1000
DEBUG = False

# please notice
logger = Logger(env, log_file=f'train-{int(EPISODES*STEPS/1000)}k.log')

start_time = time.time()
print(
    f"[INFO]Starting to get logs for {EPISODES} episodes each {STEPS} steps..")
with torch.no_grad():
    # let's collect our samples
    for episode in range(0, EPISODES):
        for steps in range(0, STEPS):
            # we use our 'expert' to predict the next action.
            action = expert.predict(np.array(obs))
            # Apply the action
            observation, reward, done, info = env.step(action)
            # Get features(state representation) for RL agent
            obs = env.get_features()
コード例 #2
0
DEBUG = True

#for map_name in ["loop_obstacles"]:
for map_name in ["udem1"]:
    env = DuckietownEnv(
        map_name=
        map_name,  # check the Duckietown Gym documentation, there are many maps of different complexity
        max_steps=EPISODES * STEPS,
        distortion=True,
        domain_rand=False)

    # this is an imperfect demonstrator... I'm sure you can construct a better one.
    expert = PurePursuitExpert(env=env)

    # please notice
    logger = Logger(env, log_file='train.log')

    # let's collect our samples
    for episode in range(0, EPISODES):
        for steps in range(0, STEPS):

            if USER_INPUT:
                key = get_user_direction()

            try:
                # we use our 'expert' to predict the next action.
                action = expert.predict(None, user_input=key)
                observation_big, reward, done, info = env.step(action)
                # we can resize the image here
                observation = cv2.resize(observation_big, (80, 60))
                # NOTICE: OpenCV changes the order of the channels !!!
コード例 #3
0
import numpy as np
import pandas as pd
import os
import collections
import rosbag
import cv_bridge
from copy import copy
from extract_data_functions import image_preprocessing, synchronize_data
from _loggers import Logger
import cv2

# A collection of ros messages coming from a single topic.
MessageCollection = collections.namedtuple("MessageCollection",
                                           ["topic", "type", "messages"])

frank_logger = Logger(log_file='training_data.log')


def extract_messages(path, requested_topics):

    # check if path is string and requested_topics a list
    assert isinstance(path, str)
    assert isinstance(requested_topics, list)

    bag = rosbag.Bag(path)

    _, available_topics = bag.get_type_and_topic_info()

    #print(available_topics)

    # check if the requested topics exist in bag's topics and if yes extract the messages only for them