コード例 #1
0
 def __init__(self,
              env,
              max_episodes,
              max_steps,
              log_file=None,
              downscale=False,
              playback=True,
              filter_bad_data=False):
     if not log_file:
         log_file = f"dataset.log"
     self.env = env
     self.env.reset()
     self.datagen = Logger(self.env, log_file=log_file)
     self.episode = 1
     self.max_episodes = max_episodes
     self.filter_bad_data = filter_bad_data
     #! Temporary Variable Setup:
     self.last_reward = 0
     self.playback_buffer = []
     #! Enter main event loop
     pyglet.clock.schedule_interval(self.update,
                                    1.0 / self.env.unwrapped.frame_rate,
                                    self.env)
     #! Get Joystick
     self.joysticks = pyglet.input.get_joysticks()
     assert self.joysticks, 'No joystick device is connected'
     self.joystick = self.joysticks[0]
     self.joystick.open()
     self.joystick.push_handlers(self.on_joybutton_press)
     pyglet.app.run()
     #! Log and exit
     self.datagen.close()
     self.env.close()
コード例 #2
0
    def __init__(self,
                 env,
                 max_episodes,
                 max_steps,
                 log_file=None,
                 downscale=False):
        if not log_file:
            log_file = f"dataset.log"
        self.env = env
        self.env.reset()
        self.logger = Logger(self.env, log_file=log_file)
        self.episode = 1
        self.max_episodes = max_episodes
        self.downscale = downscale

        #! Enter main event loop
        print("Starting data generation")

        pyglet.clock.schedule_interval(self.update,
                                       1.0 / self.env.unwrapped.frame_rate,
                                       self.env)

        pyglet.app.run()

        print("App exited, closing file descriptors")
        self.logger.close()
        self.env.close()
コード例 #3
0
class HumanDriver:
    def __init__(self,
                 env,
                 max_episodes,
                 max_steps,
                 log_file=None,
                 downscale=False,
                 playback=True,
                 filter_bad_data=False):
        if not log_file:
            log_file = f"dataset.log"
        self.env = env
        self.env.reset()
        self.datagen = Logger(self.env, log_file=log_file)
        self.episode = 1
        self.max_episodes = max_episodes
        self.filter_bad_data = filter_bad_data
        #! Temporary Variable Setup:
        self.last_reward = 0
        self.playback_buffer = []
        #! Enter main event loop
        pyglet.clock.schedule_interval(self.update,
                                       1.0 / self.env.unwrapped.frame_rate,
                                       self.env)
        #! Get Joystick
        self.joysticks = pyglet.input.get_joysticks()
        assert self.joysticks, 'No joystick device is connected'
        self.joystick = self.joysticks[0]
        self.joystick.open()
        self.joystick.push_handlers(self.on_joybutton_press)
        pyglet.app.run()
        #! Log and exit
        self.datagen.close()
        self.env.close()

    def sleep_after_reset(self, seconds):
        for remaining in range(seconds, 0, -1):
            sys.stdout.write("\r")
            sys.stdout.write("{:2d} seconds remaining.".format(remaining))
            sys.stdout.flush()
            time.sleep(1)
        sys.stdout.write("\rGO!            \n")
        return

    def playback(self):
        #! Render Image
        if args.playback:
            for entry in self.playback_buffer:
                (recorded, action, reward) = entry
                x = action[0]
                z = action[1]
                canvas = cv2.cvtColor(recorded, cv2.COLOR_BGR2RGB)
                #! Speed bar indicator
                cv2.rectangle(canvas, (20, 240), (50, int(240 - 220 * x)),
                              (76, 84, 255), cv2.FILLED)
                cv2.rectangle(canvas, (320, 430), (int(320 - 150 * z), 460),
                              (76, 84, 255), cv2.FILLED)

                cv2.imshow('Playback', canvas)
                cv2.waitKey(20)
        #! User interaction for log commitment
        qa = input('1 to commit, 2 to abort:        ')
        while not (qa == '1' or qa == '2'):
            qa = input('1 to commit, 2 to abort:        ')
        if qa == '2':
            self.datagen.reset_episode()
            print('Reset log. Discard current...')
        else:
            print("Comitting Episode")
            self.datagen.on_episode_done()
        self.playback_buffer = []  # reset playback buffer
        return

    def image_resize(self,
                     image,
                     width=None,
                     height=None,
                     inter=cv2.INTER_AREA):
        """
        Resize an image with a given width or a given height 
        and preserve the aspect ratio.
        """
        dim = None
        (h, w) = image.shape[:2]
        if width is None and height is None:
            return image
        if width is None:
            r = height / float(h)
            dim = (int(w * r), height)
        else:
            r = width / float(w)
            dim = (width, int(h * r))
        resized = cv2.resize(image, dim, interpolation=inter)
        return resized

    def on_key_press(self, symbol, modifiers):
        """
        This handler processes keyboard commands that
        control the simulation
        """
        if symbol == key.BACKSPACE or symbol == key.SLASH:
            print('RESET')
            self.playback()
            self.env.reset()
            self.env.render()
            self.sleep_after_reset(5)
        elif symbol == key.PAGEUP:
            self.env.unwrapped.cam_angle[0] = 0
            self.env.render()
        elif symbol == key.ESCAPE or symbol == key.Q:
            self.env.close()
            sys.exit(0)

    def on_joybutton_press(self, joystick, button):
        """
        Event Handler for Controller Button Inputs
        Relevant Button Definitions:
        3 - Y - Resets Env.
        """

        # Y Button
        if button == 3:
            print('RESET')
            self.playback()
            self.env.reset()
            self.env.render()
            self.sleep_after_reset(5)

    def update(self, dt, env):
        """
        This function is called at every frame to handle
        movement/stepping and redrawing
        """

        #! Joystick no action do not record
        if round(self.joystick.z, 2) == 0.0 and round(self.joystick.y,
                                                      2) == 0.0:
            return

        #! Nominal Joystick Interpretation
        x = round(self.joystick.y,
                  2) * 0.9  # To ensure maximum trun/velocity ratio
        z = round(self.joystick.z, 2) * 3.0

        #! Joystick deadband
        # if (abs(round(joystick.y, 2)) < 0.01):
        #     z = 0.0

        # if (abs(round(joystick.z, 2)) < 0.01):
        #     x = 0.0

        #! DRS enable for straight line
        if self.joystick.buttons[6]:
            x = -1.0
            z = 0.0

        action = np.array([-x, -z])

        #! GO! and get next
        # * Observation is 640x480 pixels
        (obs, reward, done, info) = self.env.step(action)

        if reward != -1000:
            print('Current Command: ', action, ' speed. Score: ', reward)
            if ((reward > self.last_reward - 0.02)
                    or not self.filter_bad_data):
                print('log')

                #! resize to Nvidia standard:
                obs_distorted_DS = self.image_resize(obs, width=200)

                #! Image pre-processing
                height, width = obs_distorted_DS.shape[:2]
                cropped = obs_distorted_DS[0:150, 0:200]

                # NOTICE: OpenCV changes the order of the channels !!!
                cropped_final = cv2.cvtColor(cropped, cv2.COLOR_RGB2YUV)
                self.playback_buffer.append((obs, action, reward))
                step = Step(cropped_final, reward, action, done)
                self.datagen.log(step, info)
                self.last_reward = reward
            else:
                print('Bad Training Data! Discarding...')
                self.last_reward = reward
        else:
            print('!!!OUT OF BOUND!!!')

        if done:
            self.playback()
            self.env.reset()
            self.env.render()
            self.sleep_after_reset(5)
            return

        self.env.render()
import collections
import rosbag
import cv_bridge
from copy import copy
from extract_data_functions import image_preprocessing, synchronize_data
from log_util import Logger
from log_schema import Episode, Step
import cv2

VEHICLE_NAME = 'avlduck2'

# A collection of ros messages coming from a single topic.
MessageCollection = collections.namedtuple(
    "MessageCollection", ["topic", "type", "messages"])

frank_logger = Logger(log_file='converted/dataset.log')


def extract_messages(path, requested_topics):

    # check if path is string and requested_topics a list
    assert isinstance(path, str)
    assert isinstance(requested_topics, list)

    bag = rosbag.Bag(path)

    _, available_topics = bag.get_type_and_topic_info()

    # print(available_topics)

    # check if the requested topics exist in bag's topics and if yes extract the messages only for them
コード例 #5
0
class DataGenerator:
    def __init__(self,
                 env,
                 max_episodes,
                 max_steps,
                 log_file=None,
                 downscale=False):
        if not log_file:
            log_file = f"dataset.log"
        self.env = env
        self.env.reset()
        self.logger = Logger(self.env, log_file=log_file)
        self.episode = 1
        self.max_episodes = max_episodes
        self.downscale = downscale

        #! Enter main event loop
        print("Starting data generation")

        pyglet.clock.schedule_interval(self.update,
                                       1.0 / self.env.unwrapped.frame_rate,
                                       self.env)

        pyglet.app.run()

        print("App exited, closing file descriptors")
        self.logger.close()
        self.env.close()

    def image_resize(self,
                     image,
                     width=None,
                     height=None,
                     inter=cv2.INTER_AREA):
        # initialize the dimensions of the image to be resized and
        # grab the image size
        dim = None
        (h, w) = image.shape[:2]

        # if both the width and height are None, then return the
        # original image
        if width is None and height is None:
            return image

        # check to see if the width is None
        if width is None:
            # calculate the ratio of the height and construct the
            # dimensions
            r = height / float(h)
            dim = (int(w * r), height)

        # otherwise, the height is None
        else:
            # calculate the ratio of the width and construct the
            # dimensions
            r = width / float(w)
            dim = (width, int(h * r))

        # resize the image
        resized = cv2.resize(image, dim, interpolation=inter)

        # return the resized image
        return resized

    def pure_pursuite(self, env) -> List[float]:
        """
        Implement pure-pursuit & PID using ground truth
        Returns [velocity, steering]
        """

        # Find the curve point closest to the agent, and the tangent at that point
        closest_point, closest_tangent = env.closest_curve_point(
            env.cur_pos, env.cur_angle)

        iterations = 0

        lookup_distance = 0.5
        max_iterations = 1000
        gain = 4.0  # 2.0
        velocity = 0.35
        curve_point = None
        while iterations < max_iterations:
            # Project a point ahead along the curve tangent,
            # then find the closest point to to that
            follow_point = closest_point + closest_tangent * lookup_distance
            curve_point, _ = env.closest_curve_point(follow_point,
                                                     env.cur_angle)

            # If we have a valid point on the curve, stop
            if curve_point is not None:
                break

            iterations += 1
            lookup_distance *= 0.5

        # Compute a normalized vector to the curve point
        point_vec = curve_point - env.cur_pos
        point_vec /= np.linalg.norm(point_vec)

        right_vec = [math.sin(env.cur_angle), 0, math.cos(env.cur_angle)]

        dot = np.dot(right_vec, point_vec)
        steering = gain * -dot

        return [velocity, steering]

    def update(self, dt, env):
        """
        This function is called at every frame to handle
        movement/stepping and redrawing
        """

        action = self.pure_pursuite(env)

        #! GO! and get next
        # * Observation is 640x480 pixels
        obs, reward, done, info = env.step(action)

        if reward == REWARD_INVALID_POSE:
            print("Out of bound")
        else:
            output_img = obs
            if self.downscale:
                # Resize to (150x200)
                #! resize to Nvidia standard:
                obs_distorted_DS = self.image_resize(obs, width=200)

                #! ADD IMAGE-PREPROCESSING HERE!!!!!
                height, width = obs_distorted_DS.shape[:2]
                # print('Distorted return image Height: ', height,' Width: ',width)
                cropped = obs_distorted_DS[0:150, 0:200]

                # NOTICE: OpenCV changes the order of the channels !!!
                output_img = cv2.cvtColor(cropped, cv2.COLOR_RGB2YUV)
                # print(f"Recorded shape: {obs.shape}")
                # print(f"Saved image shape: {cropped.shape}")

            step = Step(output_img, reward, action, done)
            self.logger.log(step, info)
            # rawlog.log(obs, action, reward, done, info)
            # last_reward = reward

        if done:
            self.logger.on_episode_done()
            print(f"episode {self.episode}/{self.max_episodes}")
            self.episode += 1
            env.reset()
            if self.logger.episode_count >= args.nb_episodes:
                print("Training completed !")
                sys.exit()
            time.sleep(1)
            return
コード例 #6
0
import rosbag
import cv_bridge
from copy import copy
from extract_data_functions import image_preprocessing, synchronize_data
from log_util import Logger
from log_schema import Episode, Step
import cv2

# Change this based on the bag files being used.
VEHICLE_NAME = 'maserati'

# A collection of ros messages coming from a single topic.
MessageCollection = collections.namedtuple(
    "MessageCollection", ["topic", "type", "messages"])

frank_logger = Logger(log_file=f'converted/{VEHICLE_NAME}')


def extract_messages(path, requested_topics):

    # check if path is string and requested_topics a list
    assert isinstance(path, str)
    assert isinstance(requested_topics, list)

    bag = rosbag.Bag(path)

    _, available_topics = bag.get_type_and_topic_info()
    # check if the requested topics exist in bag's topics and if yes extract the messages only for them
    extracted_messages = {}
    for topic in requested_topics:
        if topic in available_topics: