Пример #1
0
 def __init__(self, task, train_exp_dir, ite, logdir=None):
     self.task = task
     self.policy = LoadPolicy('../utils/models/{}/{}'.format(task, train_exp_dir), ite)
     self.env = CrossroadEnd2end(training_task=self.task, mode='testing')
     self.model = EnvironmentModel(self.task, mode='selecting')
     self.recorder = Recorder()
     self.episode_counter = -1
     self.step_counter = -1
     self.obs = None
     self.stg = MultiPathGenerator()
     self.step_timer = TimerStat()
     self.ss_timer = TimerStat()
     self.logdir = logdir
     if self.logdir is not None:
         config = dict(task=task, train_exp_dir=train_exp_dir, ite=ite)
         with open(self.logdir + '/config.json', 'w', encoding='utf-8') as f:
             json.dump(config, f, ensure_ascii=False, indent=4)
     self.fig = plt.figure(figsize=(8, 8))
     plt.ion()
     self.hist_posi = []
     self.old_index = 0
     self.path_list = self.stg.generate_path(self.task)
     # ------------------build graph for tf.function in advance-----------------------
     for i in range(3):
         obs = self.env.reset()
         obs = tf.convert_to_tensor(obs[np.newaxis, :], dtype=tf.float32)
         self.is_safe(obs, i)
     obs = self.env.reset()
     obs_with_specific_shape = np.tile(obs, (3, 1))
     self.policy.run_batch(obs_with_specific_shape)
     self.policy.obj_value_batch(obs_with_specific_shape)
     # ------------------build graph for tf.function in advance-----------------------
     self.reset()
Пример #2
0
    def __init__(self, policy_cls, env_id, args):
        logging.getLogger("tensorflow").setLevel(logging.ERROR)
        self.args = args
        self.env = gym.make(env_id, **args2envkwargs(args))
        self.policy_with_value = policy_cls(self.args)
        self.iteration = 0
        if self.args.mode == 'training':
            self.log_dir = self.args.log_dir + '/evaluator'
        else:
            self.log_dir = self.args.test_log_dir
        if not os.path.exists(self.log_dir):
            os.makedirs(self.log_dir)

        self.preprocessor = Preprocessor((self.args.obs_dim, ),
                                         self.args.obs_preprocess_type,
                                         self.args.reward_preprocess_type,
                                         self.args.obs_scale,
                                         self.args.reward_scale,
                                         self.args.reward_shift,
                                         gamma=self.args.gamma)

        self.writer = self.tf.summary.create_file_writer(self.log_dir)
        self.stats = {}
        self.eval_timer = TimerStat()
        self.eval_times = 0
Пример #3
0
    def __init__(self, workers, learners, replay_buffers, evaluator, args):
        """Initialize an off-policy async optimizers.

        Arguments:
            workers (dict): {local worker, remote workers (list)>=0}
            learners (list): list of remote learners, len >= 1
            replay_buffers (list): list of replay buffers, len >= 1
        """
        self.args = args
        self.workers = workers
        self.local_worker = self.workers['local_worker']
        self.learners = learners
        self.learner_queue = queue.Queue(LEARNER_QUEUE_MAX_SIZE)
        self.replay_buffers = replay_buffers
        self.evaluator = evaluator
        self.num_sampled_steps = 0
        self.iteration = 0
        self.num_samples_dropped = 0
        self.num_grads_dropped = 0
        self.optimizer_steps = 0
        self.timers = {
            k: TimerStat()
            for k in ["sampling_timer", "replay_timer", "learning_timer"]
        }
        self.stats = {}
        self.update_thread = UpdateThread(self.workers, self.evaluator,
                                          self.args, self.stats)
        self.update_thread.start()
        self.max_weight_sync_delay = self.args.max_weight_sync_delay
        self.steps_since_update = {}
        self.log_dir = self.args.log_dir
        self.model_dir = self.args.model_dir
        if not os.path.exists(self.log_dir):
            os.makedirs(self.log_dir)
        if not os.path.exists(self.model_dir):
            os.makedirs(self.model_dir)
        self.sample_tasks = TaskPool()
        self._set_workers()

        # fill buffer to replay starts
        logger.info('start filling the replay')
        while not all([
                l >= self.args.replay_starts for l in ray.get(
                    [rb.__len__.remote() for rb in self.replay_buffers])
        ]):
            for worker, objID in list(self.sample_tasks.completed()):
                sample_batch, count = ray.get(objID)
                random.choice(
                    self.replay_buffers).add_batch.remote(sample_batch)
                self.num_sampled_steps += count
                self.sample_tasks.add(worker,
                                      worker.sample_with_count.remote())
        logger.info('end filling the replay')

        self.replay_tasks = TaskPool()
        self._set_buffers()

        self.learn_tasks = TaskPool()
        self._set_learners()
        logger.info('Optimizer initialized')
Пример #4
0
    def __init__(self, policy_cls, env_id, args):
        logging.getLogger("tensorflow").setLevel(logging.ERROR)
        self.args = args
        kwargs = copy.deepcopy(vars(self.args))
        if self.args.env_id == 'PathTracking-v0':
            self.env = gym.make(self.args.env_id,
                                num_agent=self.args.num_eval_agent,
                                num_future_data=self.args.num_future_data)
        else:
            env = gym.make(self.args.env_id)
            self.env = DummyVecEnv(env)
        self.policy_with_value = policy_cls(**kwargs)
        self.iteration = 0
        if self.args.mode == 'training':
            self.log_dir = self.args.log_dir + '/evaluator'
        else:
            self.log_dir = self.args.test_log_dir
        if not os.path.exists(self.log_dir):
            os.makedirs(self.log_dir)

        self.preprocessor = Preprocessor(**kwargs)

        self.writer = self.tf.summary.create_file_writer(self.log_dir)
        self.stats = {}
        self.eval_timer = TimerStat()
        self.eval_times = 0
Пример #5
0
 def __init__(self, shared_list, lock, args):
     self.args = args
     self.shared_list = shared_list
     self.lock = lock
     self.task = self.args.task
     self.model_only_test = self.args.model_only_test
     self.step_old = -1
     self.acc_timer = TimerStat()
     left_construct_traj = np.load('./map/left_ref.npy')
     straight_construct_traj = np.load('./map/straight_ref.npy')
     right_construct_traj = np.load('./map/right_ref.npy')
     self.ref_path_all = {
         'left': left_construct_traj,
         'straight': straight_construct_traj,
         'right': right_construct_traj,
     }
     self.ref_path = self.ref_path_all[self.task][0]  # todo
Пример #6
0
 def __init__(self, policy_cls, args):
     self.args = args
     self.batch_size = self.args.replay_batch_size
     self.policy_with_value = policy_cls(**vars(self.args))
     self.batch_data = {}
     self.preprocessor = Preprocessor(self.args.obs_dim,
                                      self.args.obs_ptype,
                                      self.args.rew_ptype,
                                      self.args.obs_scale,
                                      self.args.rew_scale,
                                      self.args.rew_shift,
                                      gamma=self.args.gamma)
     self.policy_gradient_timer = TimerStat()
     self.q_gradient_timer = TimerStat()
     self.target_timer = TimerStat()
     self.stats = {}
     self.info_for_buffer = {}
     self.counter = 0
     self.num_batch_reuse = self.args.num_batch_reuse
Пример #7
0
    def __init__(self, policy_cls, args):
        self.args = args
        self.batch_size = self.args.replay_batch_size
        self.policy_with_value = policy_cls(**vars(self.args))
        self.batch_data = {}
        self.M = self.args.M
        self.num_rollout_list_for_policy_update = self.args.num_rollout_list_for_policy_update
        self.num_rollout_list_for_q_estimation = self.args.num_rollout_list_for_q_estimation

        self.model = NAME2MODELCLS[self.args.env_id](**vars(self.args))
        self.preprocessor = Preprocessor(self.args.obs_dim, self.args.obs_ptype, self.args.rew_ptype,
                                         self.args.obs_scale, self.args.rew_scale, self.args.rew_shift,
                                         gamma=self.args.gamma)
        self.policy_gradient_timer = TimerStat()
        self.q_gradient_timer = TimerStat()
        self.stats = {}
        self.info_for_buffer = {}
        self.counter = 0
        self.num_batch_reuse = self.args.num_batch_reuse
Пример #8
0
 def __init__(self, policy_cls, args):
     self.args = args
     self.sample_num_in_learner = self.args.sample_num_in_learner
     self.batch_size = self.args.replay_batch_size
     if self.args.env_id == 'PathTracking-v0':
         self.env = gym.make(self.args.env_id, num_agent=self.batch_size, num_future_data=self.args.num_future_data)
     else:
         env = gym.make(self.args.env_id)
         self.env = DummyVecEnv(env)
     self.policy_with_value = policy_cls(**vars(self.args))
     self.batch_data = {}
     self.counter = 0
     self.num_batch_reuse = self.args.num_batch_reuse
     self.preprocessor = Preprocessor(self.args.obs_dim, self.args.obs_ptype, self.args.rew_ptype,
                                      self.args.obs_scale, self.args.rew_scale, self.args.rew_shift,
                                      gamma=self.args.gamma)
     self.policy_gradient_timer = TimerStat()
     self.q_gradient_timer = TimerStat()
     self.target_timer = TimerStat()
     self.stats = {}
     self.info_for_buffer = {}
Пример #9
0
 def __init__(self, workers, evaluator, args, optimizer_stats):
     threading.Thread.__init__(self)
     self.args = args
     self.workers = workers
     self.local_worker = workers['local_worker']
     self.evaluator = evaluator
     self.optimizer_stats = optimizer_stats
     self.inqueue = queue.Queue(maxsize=self.args.grads_queue_size)
     self.stopped = False
     self.log_dir = self.args.log_dir
     self.model_dir = self.args.model_dir
     if not os.path.exists(self.log_dir):
         os.makedirs(self.log_dir)
     if not os.path.exists(self.model_dir):
         os.makedirs(self.model_dir)
     self.iteration = 0
     self.update_timer = TimerStat()
     self.grad_queue_get_timer = TimerStat()
     self.grad_apply_timer = TimerStat()
     self.writer = tf.summary.create_file_writer(self.log_dir +
                                                 '/optimizer')
Пример #10
0
 def __init__(self, task, logdir=None):
     self.task = task
     if self.task == 'left':
         self.policy = LoadPolicy('../utils/models/left', 100000)
     elif self.task == 'right':
         self.policy = LoadPolicy('../utils/models/right', 145000)
     elif self.task == 'straight':
         self.policy = LoadPolicy('../utils/models/straight', 95000)
     self.env = CrossroadEnd2end(training_task=self.task)
     self.model = EnvironmentModel(self.task, mode='selecting')
     self.recorder = Recorder()
     self.episode_counter = -1
     self.step_counter = -1
     self.obs = None
     self.stg = None
     self.step_timer = TimerStat()
     self.ss_timer = TimerStat()
     self.logdir = logdir
     self.fig = plt.figure(figsize=(8, 8))
     plt.ion()
     self.hist_posi = []
     self.reset()
Пример #11
0
    def __init__(self, policy_cls, args):
        self.args = args
        self.sample_num_in_learner = self.args.sample_num_in_learner
        self.batch_size = self.args.replay_batch_size
        if self.args.env_id == 'PathTracking-v0':
            self.env = gym.make(self.args.env_id,
                                num_agent=self.batch_size,
                                num_future_data=self.args.num_future_data)
        else:
            env = gym.make(self.args.env_id)
            self.env = DummyVecEnv(env)
        self.policy_with_value = policy_cls(**vars(self.args))
        self.batch_data = {}
        self.counter = 0
        self.num_batch_reuse = self.args.num_batch_reuse
        self.policy_for_rollout = policy_cls(**vars(self.args))
        self.M = self.args.M
        self.num_rollout_list_for_policy_update = self.args.num_rollout_list_for_policy_update
        self.num_rollout_list_for_q_estimation = self.args.num_rollout_list_for_q_estimation

        self.model = NAME2MODELCLS[self.args.env_id](**vars(self.args))
        self.preprocessor = Preprocessor(self.args.obs_dim,
                                         self.args.obs_ptype,
                                         self.args.rew_ptype,
                                         self.args.obs_scale,
                                         self.args.rew_scale,
                                         self.args.rew_shift,
                                         gamma=self.args.gamma)
        self.policy_gradient_timer = TimerStat()
        self.q_gradient_timer = TimerStat()
        self.target_timer = TimerStat()
        self.stats = {}
        self.info_for_buffer = {}
        self.ws_old = self.tf.convert_to_tensor([0.] + [
            1. / (len(self.num_rollout_list_for_policy_update) - 1)
            for _ in range(len(self.num_rollout_list_for_policy_update) - 1)
        ],
                                                dtype=self.tf.float32)
Пример #12
0
 def __init__(self, task, train_exp_dir, ite, logdir=None):
     self.task = task
     self.policy = LoadPolicy(
         '../utils/models/{}/{}'.format(task, train_exp_dir), ite)
     self.env = CrossroadEnd2end(training_task=self.task)
     self.model = EnvironmentModel(self.task, mode='selecting')
     self.recorder = Recorder()
     self.episode_counter = -1
     self.step_counter = -1
     self.obs = None
     self.stg = None
     self.step_timer = TimerStat()
     self.ss_timer = TimerStat()
     self.logdir = logdir
     if self.logdir is not None:
         config = dict(task=task, train_exp_dir=train_exp_dir, ite=ite)
         with open(self.logdir + '/config.json', 'w',
                   encoding='utf-8') as f:
             json.dump(config, f, ensure_ascii=False, indent=4)
     self.fig = plt.figure(figsize=(8, 8))
     plt.ion()
     self.hist_posi = []
     self.reset()
Пример #13
0
    def __init__(self, policy_cls, args):
        self.args = args
        self.policy_with_value = policy_cls(self.args)
        self.batch_data = {}
        self.all_data = {}
        self.M = self.args.M
        self.num_rollout_list_for_policy_update = self.args.num_rollout_list_for_policy_update

        self.model = EnvironmentModel(**args2envkwargs(args))
        self.preprocessor = Preprocessor((self.args.obs_dim, ),
                                         self.args.obs_preprocess_type,
                                         self.args.reward_preprocess_type,
                                         self.args.obs_scale,
                                         self.args.reward_scale,
                                         self.args.reward_shift,
                                         gamma=self.args.gamma)
        self.grad_timer = TimerStat()
        self.stats = {}
        self.info_for_buffer = {}
Пример #14
0
 def __init__(self, shared_list, lock, args):
     self.args = args
     self.shared_list = shared_list
     self.lock = lock
     self.task = self.args.task
     self.model_only_test = self.args.model_only_test
     self.step_old = -1
     self.acc_timer = TimerStat()
     self._load_xml()
     self.red_img = self._read_png('utils/Rendering/red.png')
     self.green_img = self._read_png('utils/Rendering/green.png')
     self.GL_TEXTURE_RED = glGenTextures(1)
     self.GL_TEXTURE_GREEN = glGenTextures(1)
     left_construct_traj = np.load('./map/left_ref.npy')
     straight_construct_traj = np.load('./map/straight_ref.npy')
     right_construct_traj = np.load('./map/right_ref.npy')
     self.ref_path_all = {
         'left': left_construct_traj,
         'straight': straight_construct_traj,
         'right': right_construct_traj
     }
Пример #15
0
    def __init__(self, worker, learner, replay_buffer, evaluator, args):
        self.args = args
        self.worker = worker
        self.learner = learner
        self.replay_buffer = replay_buffer
        self.evaluator = evaluator
        self.num_sampled_steps = 0
        self.iteration = 0
        self.timers = {
            k: TimerStat()
            for k in [
                "sampling_timer", "replay_timer", "learning_timer",
                "grad_apply_timer"
            ]
        }
        self.stats = {}
        self.log_dir = self.args.log_dir
        self.model_dir = self.args.model_dir
        if not os.path.exists(self.log_dir):
            os.makedirs(self.log_dir)
        if not os.path.exists(self.model_dir):
            os.makedirs(self.model_dir)

        self.args.log_interval = 10
        self.args.eval_interval = 3000
        self.args.save_interval = 3000

        # fill buffer to replay starts
        logger.info('start filling the replay')
        while not len(self.replay_buffer) >= self.args.replay_starts:
            sample_batch, count = self.worker.sample_with_count()
            self.num_sampled_steps += count
            self.replay_buffer.add_batch(sample_batch)
        logger.info('end filling the replay')
        self.writer = tf.summary.create_file_writer(self.log_dir +
                                                    '/optimizer')
        logger.info('Optimizer initialized')
        self.get_stats()
Пример #16
0
class UpdateThread(threading.Thread):
    """Background thread that updates the local model from gradient list.
    """
    def __init__(self, workers, evaluator, args, optimizer_stats):
        threading.Thread.__init__(self)
        self.args = args
        self.workers = workers
        self.local_worker = workers['local_worker']
        self.evaluator = evaluator
        self.optimizer_stats = optimizer_stats
        self.inqueue = queue.Queue(maxsize=self.args.grads_queue_size)
        self.stopped = False
        self.log_dir = self.args.log_dir
        self.model_dir = self.args.model_dir
        if not os.path.exists(self.log_dir):
            os.makedirs(self.log_dir)
        if not os.path.exists(self.model_dir):
            os.makedirs(self.model_dir)
        self.iteration = 0
        self.update_timer = TimerStat()
        self.grad_queue_get_timer = TimerStat()
        self.grad_apply_timer = TimerStat()
        self.writer = tf.summary.create_file_writer(self.log_dir +
                                                    '/optimizer')

    def run(self):
        while not self.stopped:
            with self.update_timer:
                self.step()
                self.update_timer.push_units_processed(1)

    def step(self):
        self.optimizer_stats.update(
            dict(update_queue_size=self.inqueue.qsize(),
                 update_time=self.update_timer.mean,
                 update_throughput=self.update_timer.mean_throughput,
                 grad_queue_get_time=self.grad_queue_get_timer.mean,
                 grad_apply_timer=self.grad_apply_timer.mean))
        # fetch grad
        with self.grad_queue_get_timer:
            try:
                grads, learner_stats = self.inqueue.get(timeout=30)
            except Empty:
                return

        # apply grad
        with self.grad_apply_timer:
            try:
                judge_is_nan(grads)
            except ValueError:
                grads = [tf.zeros_like(grad) for grad in grads]
                logger.info('Grad is nan!, zero it')

            self.local_worker.apply_gradients(self.iteration, grads)

        # log
        if self.iteration % self.args.log_interval == 0:
            logger.info('updating {} in total'.format(self.iteration))
            logger.info('sampling {} in total'.format(
                self.optimizer_stats['num_sampled_steps']))
            with self.writer.as_default():
                for key, val in learner_stats.items():
                    if not isinstance(val, list):
                        tf.summary.scalar(
                            'optimizer/learner_stats/scalar/{}'.format(key),
                            val,
                            step=self.iteration)
                    else:
                        assert isinstance(val, list)
                        for i, v in enumerate(val):
                            tf.summary.scalar(
                                'optimizer/learner_stats/list/{}/{}'.format(
                                    key, i),
                                v,
                                step=self.iteration)
                for key, val in self.optimizer_stats.items():
                    tf.summary.scalar('optimizer/{}'.format(key),
                                      val,
                                      step=self.iteration)
                self.writer.flush()

        # evaluate
        if self.iteration % self.args.eval_interval == 0:
            self.evaluator.set_weights.remote(self.local_worker.get_weights())
            if self.args.obs_preprocess_type == 'normalize' or self.args.reward_preprocess_type == 'normalize':
                self.evaluator.set_ppc_params.remote(
                    self.local_worker.get_ppc_params())
            self.evaluator.run_evaluation.remote(self.iteration)

        # save
        if self.iteration % self.args.save_interval == 0:
            self.local_worker.save_weights(self.model_dir, self.iteration)
            self.workers['remote_workers'][0].save_ppc_params.remote(
                self.model_dir)

        self.iteration += 1
Пример #17
0
class Plot():
    def __init__(self, shared_list, lock, args):
        self.args = args
        self.shared_list = shared_list
        self.lock = lock
        self.task = self.args.task
        self.model_only_test = self.args.model_only_test
        self.step_old = -1
        self.acc_timer = TimerStat()
        left_construct_traj = np.load('./map/left_ref.npy')
        straight_construct_traj = np.load('./map/straight_ref.npy')
        right_construct_traj = np.load('./map/right_ref.npy')
        self.ref_path_all = {
            'left': left_construct_traj,
            'straight': straight_construct_traj,
            'right': right_construct_traj,
        }
        self.ref_path = self.ref_path_all[self.task][0]  # todo

    def run(self):
        extension = 40
        light_line_width = 3
        dotted_line_style = '--'
        solid_line_style = '-'
        start_time = 0
        v_old = 0.
        plt.title("Crossroad")
        ax = plt.axes(xlim=(-CROSSROAD_HALF_WIDTH - extension,
                            CROSSROAD_HALF_WIDTH + extension),
                      ylim=(-CROSSROAD_D_HEIGHT - extension,
                            CROSSROAD_U_HEIGHT + extension))

        plt.axis("equal")
        plt.axis('off')

        def is_in_plot_area(x, y, tolerance=5):
            if -CROSSROAD_HALF_WIDTH - extension + tolerance < x < CROSSROAD_HALF_WIDTH + extension - tolerance and \
                    -CROSSROAD_D_HEIGHT - extension + tolerance < y < CROSSROAD_U_HEIGHT + extension - tolerance:
                return True
            else:
                return False

        def draw_rotate_rec(x, y, a, l, w, color, linestyle='-'):
            RU_x, RU_y, _ = rotate_coordination(l / 2, w / 2, 0, -a)
            RD_x, RD_y, _ = rotate_coordination(l / 2, -w / 2, 0, -a)
            LU_x, LU_y, _ = rotate_coordination(-l / 2, w / 2, 0, -a)
            LD_x, LD_y, _ = rotate_coordination(-l / 2, -w / 2, 0, -a)
            ax.plot([RU_x + x, RD_x + x], [RU_y + y, RD_y + y],
                    color=color,
                    linestyle=linestyle)
            ax.plot([RU_x + x, LU_x + x], [RU_y + y, LU_y + y],
                    color=color,
                    linestyle=linestyle)
            ax.plot([LD_x + x, RD_x + x], [LD_y + y, RD_y + y],
                    color=color,
                    linestyle=linestyle)
            ax.plot([LD_x + x, LU_x + x], [LD_y + y, LU_y + y],
                    color=color,
                    linestyle=linestyle)

        def plot_phi_line(x, y, phi, color):
            line_length = 5
            x_forw, y_forw = x + line_length * cos(phi * pi / 180.), \
                             y + line_length * sin(phi * pi / 180.)
            plt.plot([x, x_forw], [y, y_forw], color=color, linewidth=0.5)

        while True:
            plt.cla()
            plt.axis('off')
            ax.add_patch(
                plt.Rectangle(
                    (-CROSSROAD_HALF_WIDTH - extension,
                     -CROSSROAD_D_HEIGHT - extension),
                    2 * CROSSROAD_HALF_WIDTH + 2 * extension,
                    CROSSROAD_D_HEIGHT + CROSSROAD_U_HEIGHT + 2 * extension,
                    edgecolor='black',
                    facecolor='none'))

            # ----------horizon--------------
            plt.plot(
                [-CROSSROAD_HALF_WIDTH - extension, -CROSSROAD_HALF_WIDTH],
                [0, 0],
                color='black')
            plt.plot([CROSSROAD_HALF_WIDTH + extension, CROSSROAD_HALF_WIDTH],
                     [0, 0],
                     color='black')

            #
            for i in range(1, LANE_NUMBER_LR + 1):
                linestyle = dotted_line_style if i < LANE_NUMBER_LR else solid_line_style
                plt.plot(
                    [-CROSSROAD_HALF_WIDTH - extension, -CROSSROAD_HALF_WIDTH],
                    [i * LANE_WIDTH_LR, i * LANE_WIDTH_LR],
                    linestyle=linestyle,
                    color='black')
                plt.plot(
                    [CROSSROAD_HALF_WIDTH + extension, CROSSROAD_HALF_WIDTH],
                    [i * LANE_WIDTH_LR, i * LANE_WIDTH_LR],
                    linestyle=linestyle,
                    color='black')
                plt.plot(
                    [-CROSSROAD_HALF_WIDTH - extension, -CROSSROAD_HALF_WIDTH],
                    [-i * LANE_WIDTH_LR, -i * LANE_WIDTH_LR],
                    linestyle=linestyle,
                    color='black')
                plt.plot(
                    [CROSSROAD_HALF_WIDTH + extension, CROSSROAD_HALF_WIDTH],
                    [-i * LANE_WIDTH_LR, -i * LANE_WIDTH_LR],
                    linestyle=linestyle,
                    color='black')

            # ----------vertical----------------
            plt.plot([0, 0],
                     [-CROSSROAD_D_HEIGHT - extension, -CROSSROAD_D_HEIGHT],
                     color='black')
            plt.plot([0, 0],
                     [CROSSROAD_U_HEIGHT + extension, CROSSROAD_U_HEIGHT],
                     color='black')

            #
            for i in range(1, LANE_NUMBER_UD + 1):
                linestyle = dotted_line_style if i < LANE_NUMBER_UD else solid_line_style
                plt.plot(
                    [i * LANE_WIDTH_UD, i * LANE_WIDTH_UD],
                    [-CROSSROAD_D_HEIGHT - extension, -CROSSROAD_D_HEIGHT],
                    linestyle=linestyle,
                    color='black')
                plt.plot([i * LANE_WIDTH_UD, i * LANE_WIDTH_UD],
                         [CROSSROAD_U_HEIGHT + extension, CROSSROAD_U_HEIGHT],
                         linestyle=linestyle,
                         color='black')
                plt.plot(
                    [-i * LANE_WIDTH_UD, -i * LANE_WIDTH_UD],
                    [-CROSSROAD_D_HEIGHT - extension, -CROSSROAD_D_HEIGHT],
                    linestyle=linestyle,
                    color='black')
                plt.plot([-i * LANE_WIDTH_UD, -i * LANE_WIDTH_UD],
                         [CROSSROAD_U_HEIGHT + extension, CROSSROAD_U_HEIGHT],
                         linestyle=linestyle,
                         color='black')

            # ----------Oblique--------------
            plt.plot([LANE_NUMBER_UD * LANE_WIDTH_UD, CROSSROAD_HALF_WIDTH],
                     [-CROSSROAD_D_HEIGHT, -LANE_NUMBER_LR * LANE_WIDTH_LR],
                     color='black')
            plt.plot([LANE_NUMBER_UD * LANE_WIDTH_UD, CROSSROAD_HALF_WIDTH],
                     [CROSSROAD_U_HEIGHT, LANE_NUMBER_LR * LANE_WIDTH_LR],
                     color='black')
            plt.plot([-LANE_NUMBER_UD * LANE_WIDTH_UD, -CROSSROAD_HALF_WIDTH],
                     [-CROSSROAD_D_HEIGHT, -LANE_NUMBER_LR * LANE_WIDTH_LR],
                     color='black')
            plt.plot([-LANE_NUMBER_UD * LANE_WIDTH_UD, -CROSSROAD_HALF_WIDTH],
                     [CROSSROAD_U_HEIGHT, LANE_NUMBER_LR * LANE_WIDTH_LR],
                     color='black')

            # ----------------------ref_path--------------------
            color = ['blue', 'coral', 'cyan', 'green']
            for index, path in enumerate(self.ref_path_all[self.task]):
                if index == self.shared_list[12]:
                    ax.plot(path[0], path[1], color=color[index], alpha=1.0)
                else:
                    ax.plot(path[0], path[1], color=color[index], alpha=0.3)

            # plot_ref = ['left', 'straight', 'right']  # 'left','straight','right', 'left_ref','straight_ref','right_ref'
            # for ref in plot_ref:
            #     ref_path = self.ref_path_all[ref][0]
            #     ax.plot(ref_path[0], ref_path[1])
            # ax.plot(self.ref_path[0], self.ref_path[1], color='g')  # todo:

            state_other = self.shared_list[4].copy()
            # plot cars
            for veh in state_other:
                veh_x = veh['x']
                veh_y = veh['y']
                veh_phi = veh['phi']
                veh_l = L
                veh_w = W
                if is_in_plot_area(veh_x, veh_y):
                    plot_phi_line(veh_x, veh_y, veh_phi, 'black')
                    draw_rotate_rec(veh_x, veh_y, veh_phi, veh_l, veh_w,
                                    'black')

            interested_vehs = self.shared_list[10].copy()

            for i in range(int(len(interested_vehs) / 4)):  # TODO:
                veh_x = interested_vehs[4 * i + 0]
                veh_y = interested_vehs[4 * i + 1]
                # plt.text(veh_x, veh_y, i, fontsize=12)
                veh_phi = interested_vehs[4 * i + 3]
                veh_l = L
                veh_w = W
                if is_in_plot_area(veh_x, veh_y):
                    plot_phi_line(veh_x, veh_y, veh_phi, 'black')
                    draw_rotate_rec(veh_x,
                                    veh_y,
                                    veh_phi,
                                    veh_l,
                                    veh_w,
                                    'b',
                                    linestyle='--')

            v_light = int(self.shared_list[13])  # todo
            if v_light == 0:
                v_color, h_color = 'black', 'black'  # 'green', 'red'
            elif v_light == 1:
                v_color, h_color = 'black', 'black'
            elif v_light == 2:
                v_color, h_color = 'black', 'black'
            else:
                v_color, h_color = 'black', 'black'

            plt.plot([(LANE_NUMBER_UD - 1) * LANE_WIDTH_UD,
                      LANE_NUMBER_UD * LANE_WIDTH_UD],
                     [-CROSSROAD_D_HEIGHT, -CROSSROAD_D_HEIGHT],
                     color=v_color,
                     linewidth=light_line_width)
            plt.plot([
                -LANE_NUMBER_UD * LANE_WIDTH_UD,
                -(LANE_NUMBER_UD - 1) * LANE_WIDTH_UD
            ], [CROSSROAD_U_HEIGHT, CROSSROAD_U_HEIGHT],
                     color=v_color,
                     linewidth=light_line_width)
            plt.plot([0, (LANE_NUMBER_UD - 1) * LANE_WIDTH_UD],
                     [-CROSSROAD_D_HEIGHT, -CROSSROAD_D_HEIGHT],
                     color=v_color,
                     linewidth=light_line_width)
            plt.plot([0, -(LANE_NUMBER_UD - 1) * LANE_WIDTH_UD],
                     [CROSSROAD_U_HEIGHT, CROSSROAD_U_HEIGHT],
                     color=v_color,
                     linewidth=light_line_width)

            state_ego = self.shared_list[9].copy()
            ego_v = state_ego['VehicleSPeedAct']
            ego_steer = state_ego['SteerAngleAct']
            ego_gear = state_ego['AutoGear']
            ego_gps_v = state_ego['GpsSpeed']
            ego_north_v = state_ego['NorthVelocity']
            ego_east_v = state_ego['EastVelocity']
            ego_yaw_rate = state_ego['YawRate']
            ego_long_acc = state_ego['LongitudinalAcc']
            ego_lat_acc = state_ego['LateralAcc']
            ego_throttle = state_ego['Throttle']
            ego_brk = state_ego['BrkOn']
            ego_x = state_ego['GaussX']
            ego_y = state_ego['GaussY']
            ego_longitude = state_ego['Longitude']
            ego_latitude = state_ego['Latitude']
            ego_phi = state_ego['Heading']
            ego_l = L
            ego_w = W

            if not self.model_only_test:
                real_action_x = state_ego['model_x_in_real_action']
                real_action_y = state_ego['model_y_in_real_action']
                real_action_phi = state_ego['model_phi_in_real_action']
                plot_phi_line(real_action_x, real_action_y, real_action_phi,
                              'blue')
                draw_rotate_rec(real_action_x, real_action_y, real_action_phi,
                                ego_l, ego_w, 'blue')

                model_action_x = state_ego['model_x_in_model_action']
                model_action_y = state_ego['model_y_in_model_action']
                model_action_phi = state_ego['model_phi_in_model_action']
                plot_phi_line(model_action_x, model_action_y, model_action_phi,
                              'coral')
                draw_rotate_rec(model_action_x, model_action_y,
                                model_action_phi, ego_l, ego_w, 'coral')

            ego_l = L
            ego_w = W
            plot_phi_line(ego_x, ego_y, ego_phi, 'red')
            draw_rotate_rec(ego_x, ego_y, ego_phi, ego_l, ego_w, 'red')
            # model_x = state_ego['model_x']
            # model_y = state_ego['model_y']
            # model_phi = state_ego['model_phi']
            # draw_rotate_rec(model_x, model_y, model_phi, ego_l, ego_w, 'blue')

            time1 = time.time()
            delta_time = time1 - start_time
            acc_actual = (ego_v - v_old) / delta_time
            self.acc_timer.push(acc_actual)
            start_time = time.time()
            v_old = ego_v

            indexs, points = find_closest_point(self.ref_path,
                                                np.array([ego_x], np.float32),
                                                np.array([ego_y], np.float32))
            path_x, path_y, path_phi = points[0][0], points[1][0], points[2][0]
            plt.plot(path_x, path_y, 'g.')
            delta_x, delta_y, delta_phi = ego_x - path_x, ego_y - path_y, ego_phi - path_phi

            # plot txt
            text_x, text_y_start = -110, 60
            ge = iter(range(0, 1000, 4))
            plt.text(text_x, text_y_start - next(ge),
                     'ego_GaussX: {:.2f}m'.format(ego_x))
            plt.text(text_x, text_y_start - next(ge),
                     'ego_GaussY: {:.2f}m'.format(ego_y))
            plt.text(text_x, text_y_start - next(ge),
                     'gps_v: {:.2f}m/s'.format(ego_gps_v))
            plt.text(text_x, text_y_start - next(ge),
                     'north_v: {:.2f}m/s'.format(ego_north_v))
            plt.text(text_x, text_y_start - next(ge),
                     'east_v: {:.2f}m/s'.format(ego_east_v))
            plt.text(text_x, text_y_start - next(ge),
                     'yaw_rate: {:.2f}rad/s'.format(ego_yaw_rate))
            plt.text(text_x, text_y_start - next(ge),
                     r'longitude: ${:.2f}\degree$'.format(ego_longitude))
            plt.text(text_x, text_y_start - next(ge),
                     r'latitude: ${:.2f}\degree$'.format(ego_latitude))
            plt.text(text_x, text_y_start - next(ge),
                     r'long_acc: ${:.2f}m/s^2$'.format(ego_long_acc))
            plt.text(text_x, text_y_start - next(ge),
                     r'lat_acc: ${:.2f}m/s^2$'.format(ego_lat_acc))

            plt.text(text_x, text_y_start - next(ge),
                     'path_x: {:.2f}m'.format(path_x))
            plt.text(text_x, text_y_start - next(ge),
                     'path_y: {:.2f}m'.format(path_y))
            plt.text(text_x, text_y_start - next(ge),
                     'delta_x: {:.2f}m'.format(delta_x))
            plt.text(text_x, text_y_start - next(ge),
                     'delta_y: {:.2f}m'.format(delta_y))
            plt.text(text_x, text_y_start - next(ge),
                     r'ego_phi: ${:.2f}\degree$'.format(ego_phi))
            plt.text(text_x, text_y_start - next(ge),
                     r'path_phi: ${:.2f}\degree$'.format(path_phi))
            plt.text(text_x, text_y_start - next(ge),
                     r'delta_phi: ${:.2f}\degree$'.format(delta_phi))

            decision = self.shared_list[8].copy()
            decision_steer = decision['SteerAngleAim']
            decision_torque = decision['Torque']
            decision_brkacc = decision['Deceleration']
            decision_Dec_flag = decision['Dec_flag']
            decision_Tor_flag = decision['Tor_flag']
            front_wheel_deg = decision['front_wheel_deg']
            acc = decision['a_x']

            text_x, text_y_start = 70, 60
            ge = iter(range(0, 1000, 4))

            # done info
            # plt.text(text_x, text_y_start - next(ge), 'done info: {}'.format(self.done_type))
            plt.text(text_x, text_y_start - next(ge), 'CAN')
            plt.text(text_x, text_y_start - next(ge),
                     'speed_act: {:.2f}m/s'.format(ego_v))
            plt.text(text_x, text_y_start - next(ge),
                     r'steering_act: ${:.2f}\degree$'.format(ego_steer))
            plt.text(text_x, text_y_start - next(ge),
                     'throttle: {:.2f}'.format(ego_throttle))
            plt.text(text_x, text_y_start - next(ge),
                     'brake_on: {:.2f}'.format(ego_brk))
            plt.text(text_x, text_y_start - next(ge),
                     'gear: {:.2f}'.format(ego_gear))
            plt.text(text_x, text_y_start - next(ge), '  ')
            plt.text(text_x, text_y_start - next(ge), 'Decision')
            plt.text(
                text_x, text_y_start - next(ge),
                r'steer_aim_decision: ${:.2f}\degree$'.format(decision_steer))
            plt.text(text_x, text_y_start - next(ge),
                     'torque_decision: {:.2f}Nm'.format(decision_torque))
            plt.text(text_x, text_y_start - next(ge),
                     'torque_flag: {}'.format(decision_Tor_flag))
            plt.text(
                text_x, text_y_start - next(ge),
                r'brake_acc_decision: {:.2f}$m/s^2$'.format(decision_brkacc))
            plt.text(text_x, text_y_start - next(ge),
                     'deceleration_flag: {}'.format(decision_Dec_flag))
            plt.text(text_x, text_y_start - next(ge), '  ')
            plt.text(text_x, text_y_start - next(ge), 'Net out')
            plt.text(text_x, text_y_start - next(ge),
                     'front_wheel_deg: {:.2f}'.format(front_wheel_deg))
            plt.text(text_x, text_y_start - next(ge),
                     r'acc: {:.2f}$m/s^2$'.format(acc))
            plt.text(text_x, text_y_start - next(ge),
                     r'acc_actual: {:.2f}$m/s^2$'.format(self.acc_timer.mean))

            # plot text of trajectroy
            text_x, text_y_start = -40, -70
            ge = iter(range(0, 1000, 6))
            traj_return_value = self.shared_list[11]
            for i, value in enumerate(traj_return_value):
                if i == self.shared_list[12]:
                    plt.text(
                        text_x,
                        text_y_start - next(ge),
                        'Path reward={:.4f}, Collision risk={:.4f}'.format(
                            value[0], value[1]),
                        fontsize=14,
                        color=color[i],
                        fontstyle='italic')
                else:
                    plt.text(
                        text_x,
                        text_y_start - next(ge),
                        'Path reward={:.4f}, Collision risk={:.4f}'.format(
                            value[0], value[1]),
                        fontsize=10,
                        color=color[i],
                        fontstyle='italic')
            plt.pause(0.00001)