コード例 #1
0
ファイル: ddpg_actor.py プロジェクト: xie9187/lan_nav
    def Model(self, inputs):
        laser, cmd, cmd_next, cmd_skip, prev_action, obj_goal, prev_state_2 = inputs
        with tf.variable_scope('encoder'):
            embedding_w_goal = tf.get_variable('embedding_w_goal',
                                               [self.dim_action, self.dim_emb])
            embedding_b_goal = tf.get_variable('embedding_b_goal',
                                               [self.dim_emb])
            embedding_status = tf.get_variable(
                'embedding_status', [self.n_cmd_type**2, self.dim_emb])
            embedding_w_action = tf.get_variable(
                'embedding_w_action', [self.dim_action, self.dim_emb])
            embedding_b_action = tf.get_variable('embedding_b_action',
                                                 [self.dim_emb])
            embedding_w_status = tf.get_variable('embedding_w_status',
                                                 [self.dim_cmd, self.dim_emb])
            embedding_b_status = tf.get_variable('embedding_b_status',
                                                 [self.dim_emb])

            # training input
            conv1 = model_utils.Conv1D(laser, 2, 5, 4, scope='conv1')
            conv2 = model_utils.Conv1D(conv1, 4, 5, 4, scope='conv2')
            conv3 = model_utils.Conv1D(conv2, 8, 5, 4, scope='conv3')
            shape = conv3.get_shape().as_list()
            vector_laser = tf.reshape(conv3, (-1, shape[1] * shape[2]))

            curr_status = cmd * self.n_cmd_type + cmd_next
            next_status = cmd_next * self.n_cmd_type + cmd_skip
            vector_curr_status = tf.reshape(
                tf.nn.embedding_lookup(embedding_status, curr_status),
                (-1, self.dim_emb))

            vector_prev_action = tf.matmul(
                prev_action, embedding_w_action) + embedding_b_action

            vector_obj_goal = tf.matmul(obj_goal,
                                        embedding_w_goal) + embedding_b_goal

            input_vector = tf.concat([
                vector_laser, vector_curr_status, vector_prev_action,
                vector_obj_goal
            ],
                                     axis=1)

        with tf.variable_scope('controller'):
            shape = input_vector.get_shape().as_list()
            w_hidden = tf.get_variable(
                'w_hidden', [shape[1], self.n_hidden],
                initializer=tf.contrib.layers.xavier_initializer())
            b_hidden = tf.get_variable(
                'b_hidden', [self.n_hidden],
                initializer=tf.contrib.layers.xavier_initializer())

            w_action_linear = tf.get_variable(
                'w_action_linear', [self.n_hidden, self.dim_action / 2],
                initializer=tf.contrib.layers.xavier_initializer())
            b_action_linear = tf.get_variable(
                'b_action_linear', [self.dim_action / 2],
                initializer=tf.contrib.layers.xavier_initializer())
            w_action_angular = tf.get_variable(
                'w_action_angular', [self.n_hidden, self.dim_action / 2],
                initializer=tf.contrib.layers.xavier_initializer())
            b_action_angular = tf.get_variable(
                'b_action_angular', [self.dim_action / 2],
                initializer=tf.contrib.layers.xavier_initializer())

            hidden = tf.nn.leaky_relu(
                tf.matmul(input_vector, w_hidden) + b_hidden)
            a_linear = tf.nn.sigmoid(
                tf.matmul(hidden, w_action_linear) +
                b_action_linear) * self.action_range[0]
            a_angular = tf.nn.tanh(
                tf.matmul(hidden, w_action_angular) +
                b_action_angular) * self.action_range[1]
            pred_action = tf.concat([a_linear, a_angular], axis=1)

        with tf.variable_scope('planner'):
            rnn_cell_2 = model_utils._lstm_cell(self.n_hidden,
                                                self.n_layers,
                                                name='rnn/basic_lstm_cell')

            w_status_matrix = tf.get_variable(
                'w_status_matrix', [self.n_cmd_type**2, self.n_hidden],
                initializer=tf.contrib.layers.xavier_initializer())
            b_status_matrix = tf.get_variable(
                'b_status_matrix', [self.n_cmd_type**2],
                initializer=tf.contrib.layers.xavier_initializer())
            status_curr = tf.reshape(cmd * self.n_cmd_type + cmd_next,
                                     [-1])  # b*l, 1 -> (1)
            status_next = tf.reshape(cmd_next * self.n_cmd_type + cmd_skip,
                                     [-1])
            w_status_curr = tf.reshape(tf.gather(w_status_matrix, status_curr),
                                       [-1, self.n_hidden, 1])  # b, h, 1
            w_status_next = tf.reshape(tf.gather(w_status_matrix, status_next),
                                       [-1, self.n_hidden, 1])
            b_status_curr = tf.reshape(tf.gather(b_status_matrix, status_curr),
                                       [-1, 1])  # b, 1
            b_status_next = tf.reshape(tf.gather(b_status_matrix, status_next),
                                       [-1, 1])
            w_status = tf.concat([w_status_curr, w_status_next],
                                 axis=2)  # b, h, 2
            b_status = tf.concat([b_status_curr, b_status_next],
                                 axis=1)  # b, 2

            rnn_output_2, state_2 = rnn_cell_2(input_vector, prev_state_2)
            rnn_output_expand = tf.expand_dims(rnn_output_2, 1)  # b, h, 1
            logits = tf.reshape(tf.matmul(rnn_output_expand, w_status),
                                [-1, 2]) + b_status

        return pred_action, logits, state_2
コード例 #2
0
    def Encoder(self, input_laser, input_cmd, input_cmd_next, input_obj_goal,
                label_action, label_status):
        with tf.variable_scope('encoder'):
            embedding_w_goal = tf.get_variable('embedding_w_goal',
                                               [self.dim_action, self.dim_emb])
            embedding_b_goal = tf.get_variable('embedding_b_goal',
                                               [self.dim_emb])
            embedding_cmd = tf.get_variable('cmd_embedding',
                                            [self.n_cmd_type, self.dim_emb])
            embedding_status = tf.get_variable('status_embedding',
                                               [2, self.dim_emb])
            embedding_w_action = tf.get_variable(
                'embedding_w_action', [self.dim_action, self.dim_emb])
            embedding_b_action = tf.get_variable('embedding_b_action',
                                                 [self.dim_emb])
            embedding_w_status = tf.get_variable('embedding_w_status',
                                                 [self.dim_cmd, self.dim_emb])
            embedding_b_status = tf.get_variable('embedding_b_status',
                                                 [self.dim_emb])

            batch_size = self.batch_size / self.gpu_num
            # training input
            conv1 = model_utils.Conv1D(input_laser,
                                       2,
                                       5,
                                       4,
                                       scope='conv1',
                                       trainable=self.encoder_training)
            conv2 = model_utils.Conv1D(conv1,
                                       4,
                                       5,
                                       4,
                                       scope='conv2',
                                       trainable=self.encoder_training)
            conv3 = model_utils.Conv1D(conv2,
                                       8,
                                       5,
                                       4,
                                       scope='conv3',
                                       trainable=self.encoder_training)
            shape = conv3.get_shape().as_list()
            vector_laser = tf.reshape(conv3, (-1, shape[1] * shape[2]))

            vector_cmd = tf.reshape(
                tf.nn.embedding_lookup(embedding_cmd, input_cmd),
                (-1, self.dim_emb))
            vector_cmd_next = tf.reshape(
                tf.nn.embedding_lookup(embedding_cmd, input_cmd_next),
                (-1, self.dim_emb))

            label_status_reshape = tf.reshape(
                label_status, [-1, self.max_step, self.dim_cmd])  #  b, l, 1
            label_status_0_to_m1 = tf.slice(
                label_status_reshape, [0, 0, 0],
                [batch_size, self.max_step - 1, self.dim_cmd])
            prev_status_0 = tf.tile(
                tf.zeros([1, 1, self.dim_cmd], dtype=tf.int64),
                [batch_size, 1, 1])
            prev_status = tf.reshape(
                tf.concat([prev_status_0, label_status_0_to_m1], axis=1),
                [-1, self.dim_cmd])
            vector_prev_status = tf.reshape(
                tf.nn.embedding_lookup(embedding_status, prev_status),
                (-1, self.dim_emb))

            linear_err_mean, angular_err_mean = tf.split(
                self.a_err_mean_param, 2)
            linear_err_var, angular_err_var = tf.split(self.a_err_var_param, 2)

            a_linear_noise = tf.random_normal([batch_size * self.max_step, 1],
                                              linear_err_mean, linear_err_var)
            a_angular_noise = tf.random_normal([batch_size * self.max_step, 1],
                                               angular_err_mean,
                                               angular_err_var)
            a_noise = tf.concat([a_linear_noise, a_angular_noise], axis=1)
            clip_min = tf.tile(tf.constant([[0., -self.action_range[1]]]),
                               [batch_size * self.max_step, 1])
            clip_max = tf.tile(
                tf.constant([[self.action_range[0], self.action_range[1]]]),
                [batch_size * self.max_step, 1])
            # noisy_action = tf.clip_by_value(a_noise + label_action, clip_min, clip_max)
            noisy_action = tf.clip_by_value(label_action, clip_min, clip_max)

            noisy_action_reshape = tf.reshape(
                noisy_action,
                [batch_size, self.max_step, self.dim_action])  #  b, l, 2
            noisy_action_0_to_m1 = tf.slice(
                noisy_action_reshape, [0, 0, 0],
                [batch_size, self.max_step - 1, self.dim_action])
            prev_action_0 = tf.tile(tf.zeros([1, 1, self.dim_action]),
                                    [batch_size, 1, 1])
            prev_action = tf.reshape(
                tf.concat([prev_action_0, noisy_action_0_to_m1], axis=1),
                [-1, self.dim_action])
            vector_prev_action = tf.matmul(
                prev_action, embedding_w_action) + embedding_b_action

            vector_obj_goal = tf.matmul(input_obj_goal,
                                        embedding_w_goal) + embedding_b_goal

            training_input = tf.concat([
                vector_laser, vector_cmd, vector_cmd_next, vector_prev_status,
                vector_prev_action, vector_obj_goal
            ],
                                       axis=1)

            # testing input
            conv1 = model_utils.Conv1D(self.test_laser,
                                       2,
                                       5,
                                       4,
                                       scope='conv1',
                                       trainable=self.encoder_training,
                                       reuse=True)
            conv2 = model_utils.Conv1D(conv1,
                                       4,
                                       5,
                                       4,
                                       scope='conv2',
                                       trainable=self.encoder_training,
                                       reuse=True)
            conv3 = model_utils.Conv1D(conv2,
                                       8,
                                       5,
                                       4,
                                       scope='conv3',
                                       trainable=self.encoder_training,
                                       reuse=True)
            shape = conv3.get_shape().as_list()
            vector_laser_test = tf.reshape(conv3, (-1, shape[1] * shape[2]))

            vector_cmd_test = tf.reshape(
                tf.nn.embedding_lookup(embedding_cmd, self.test_cmd),
                (-1, self.dim_emb))
            vector_cmd_next_test = tf.reshape(
                tf.nn.embedding_lookup(embedding_cmd, self.test_cmd_next),
                (-1, self.dim_emb))
            vector_prev_status_test = tf.reshape(
                tf.nn.embedding_lookup(embedding_status,
                                       self.test_prev_status),
                (-1, self.dim_emb))
            vector_prev_action_test = tf.matmul(
                self.test_prev_action, embedding_w_action) + embedding_b_action
            vector_obj_goal_test = tf.matmul(
                self.test_obj_goal, embedding_w_goal) + embedding_b_goal

            testing_input = tf.concat([
                vector_laser_test, vector_cmd_test, vector_cmd_next_test,
                vector_prev_status_test, vector_prev_action_test,
                vector_obj_goal_test
            ],
                                      axis=1)

        return training_input, testing_input
コード例 #3
0
ファイル: model_parallel.py プロジェクト: xie9187/lan_nav
    def Encoder(self, input_laser, input_cmd, input_cmd_next, input_obj_goal,
                label_action, label_status):
        with tf.variable_scope('encoder'):
            embedding_w_goal = tf.get_variable('embedding_w_goal',
                                               [self.dim_action, self.dim_emb])
            embedding_b_goal = tf.get_variable('embedding_b_goal',
                                               [self.dim_emb])
            embedding_cmd = tf.get_variable('cmd_embedding',
                                            [self.n_cmd_type, self.dim_emb])
            embedding_w_action = tf.get_variable(
                'embedding_w_action', [self.dim_action, self.dim_emb])
            embedding_b_action = tf.get_variable('embedding_b_action',
                                                 [self.dim_emb])
            embedding_w_status = tf.get_variable('embedding_w_status',
                                                 [self.dim_cmd, self.dim_emb])
            embedding_b_status = tf.get_variable('embedding_b_status',
                                                 [self.dim_emb])

            batch_size = self.batch_size / self.gpu_num
            # training input
            conv1 = model_utils.Conv1D(input_laser,
                                       2,
                                       5,
                                       4,
                                       scope='conv1',
                                       trainable=self.encoder_training)
            conv2 = model_utils.Conv1D(conv1,
                                       4,
                                       5,
                                       4,
                                       scope='conv2',
                                       trainable=self.encoder_training)
            conv3 = model_utils.Conv1D(conv2,
                                       8,
                                       5,
                                       4,
                                       scope='conv3',
                                       trainable=self.encoder_training)
            shape = conv3.get_shape().as_list()
            vector_laser = tf.reshape(conv3, (-1, shape[1] * shape[2]))

            vector_cmd = tf.reshape(
                tf.nn.embedding_lookup(embedding_cmd, input_cmd),
                (-1, self.dim_emb))
            vector_cmd_next = tf.reshape(
                tf.nn.embedding_lookup(embedding_cmd, input_cmd_next),
                (-1, self.dim_emb))

            label_status_reshape = tf.reshape(
                label_status,
                [batch_size, self.max_step, self.dim_cmd])  #  b, l, 1
            label_status_0_to_m1 = tf.slice(
                label_status_reshape, [0, 0, 0],
                [batch_size, self.max_step - 1, self.dim_cmd])
            prev_status_0 = tf.tile(
                tf.zeros([1, 1, self.dim_cmd], dtype=tf.int64),
                [batch_size, 1, 1])
            prev_status = tf.cast(
                tf.reshape(
                    tf.concat([prev_status_0, label_status_0_to_m1], axis=1),
                    [-1, self.dim_cmd]), tf.float32)
            vector_prev_status = tf.matmul(
                prev_status, embedding_w_status) + embedding_b_status

            label_action_reshape = tf.reshape(
                label_action,
                [batch_size, self.max_step, self.dim_action])  #  b, l, 2
            label_action_0_to_m1 = tf.slice(
                label_action_reshape, [0, 0, 0],
                [batch_size, self.max_step - 1, self.dim_action])
            prev_action_0 = tf.tile(tf.zeros([1, 1, self.dim_action]),
                                    [batch_size, 1, 1])
            prev_action = tf.reshape(
                tf.concat([prev_action_0, label_action_0_to_m1], axis=1),
                [-1, self.dim_action])
            vector_prev_action = tf.matmul(
                prev_action, embedding_w_action) + embedding_b_action

            vector_obj_goal = tf.matmul(input_obj_goal,
                                        embedding_w_goal) + embedding_b_goal

            training_planner_input = tf.concat([
                vector_laser, vector_cmd, vector_cmd_next, vector_prev_status,
                vector_obj_goal
            ],
                                               axis=1)

            training_controller_input = tf.concat([
                vector_laser, vector_cmd, vector_cmd_next, vector_prev_action,
                vector_obj_goal
            ],
                                                  axis=1)

            # testing input
            conv1 = model_utils.Conv1D(self.test_laser,
                                       2,
                                       5,
                                       4,
                                       scope='conv1',
                                       trainable=self.encoder_training,
                                       reuse=True)
            conv2 = model_utils.Conv1D(conv1,
                                       4,
                                       5,
                                       4,
                                       scope='conv2',
                                       trainable=self.encoder_training,
                                       reuse=True)
            conv3 = model_utils.Conv1D(conv2,
                                       8,
                                       5,
                                       4,
                                       scope='conv3',
                                       trainable=self.encoder_training,
                                       reuse=True)
            shape = conv3.get_shape().as_list()
            vector_laser_test = tf.reshape(conv3, (-1, shape[1] * shape[2]))

            vector_cmd_test = tf.reshape(
                tf.nn.embedding_lookup(embedding_cmd, self.test_cmd),
                (-1, self.dim_emb))
            vector_cmd_next_test = tf.reshape(
                tf.nn.embedding_lookup(embedding_cmd, self.test_cmd_next),
                (-1, self.dim_emb))
            vector_prev_status_test = tf.matmul(
                self.test_prev_status, embedding_w_status) + embedding_b_status
            vector_prev_action_test = tf.matmul(
                self.test_prev_action, embedding_w_action) + embedding_b_action
            vector_obj_goal_test = tf.matmul(
                self.test_obj_goal, embedding_w_goal) + embedding_b_goal

            testing_planner_input = tf.concat([
                vector_laser_test, vector_cmd_test, vector_cmd_next_test,
                vector_prev_status_test, vector_obj_goal_test
            ],
                                              axis=1)

            testing_controller_input = tf.concat([
                vector_laser_test, vector_cmd_test, vector_cmd_next_test,
                vector_prev_action_test, vector_obj_goal_test
            ],
                                                 axis=1)

        return training_planner_input, training_controller_input, testing_planner_input, testing_controller_input
コード例 #4
0
ファイル: rdpg_critic.py プロジェクト: xie9187/lan_nav
    def Model(self, inputs):
        laser, cmd, cmd_next, prev_action, obj_goal, action = inputs
        with tf.variable_scope('encoder'):

            embedding_w_goal = tf.get_variable('embedding_w_goal',
                                               [self.dim_action, self.dim_emb])
            embedding_b_goal = tf.get_variable('embedding_b_goal',
                                               [self.dim_emb])
            embedding_status = tf.get_variable(
                'embedding_status', [self.n_cmd_type**2, self.dim_emb])
            embedding_w_action = tf.get_variable(
                'embedding_w_action', [self.dim_action, self.dim_emb])
            embedding_b_action = tf.get_variable('embedding_b_action',
                                                 [self.dim_emb])
            embedding_w_status = tf.get_variable('embedding_w_status',
                                                 [self.dim_cmd, self.dim_emb])
            embedding_b_status = tf.get_variable('embedding_b_status',
                                                 [self.dim_emb])

            conv1 = model_utils.Conv1D(self.input_laser,
                                       2,
                                       5,
                                       4,
                                       scope='conv1')
            conv2 = model_utils.Conv1D(conv1, 4, 5, 4, scope='conv2')
            conv3 = model_utils.Conv1D(conv2, 8, 5, 4, scope='conv3')
            shape = conv3.get_shape().as_list()
            vector_laser = tf.reshape(conv3, (-1, shape[1] * shape[2]))
            curr_status = cmd * self.n_cmd_type + cmd_next
            vector_curr_status = tf.reshape(
                tf.nn.embedding_lookup(embedding_status, curr_status),
                (-1, self.dim_emb))
            vector_prev_action = tf.matmul(
                prev_action, embedding_w_action) + embedding_b_action
            vector_obj_goal = tf.matmul(obj_goal,
                                        embedding_w_goal) + embedding_b_goal
            vector_action = tf.matmul(action,
                                      embedding_w_action) + embedding_b_action

            input_vector = tf.concat([
                vector_laser, vector_curr_status, vector_prev_action,
                vector_obj_goal, vector_action
            ],
                                     axis=1)

        with tf.variable_scope('controller'):
            shape = input_vector.get_shape().as_list()
            w_hidden_1 = tf.get_variable(
                'w_hidden_1', [shape[1], self.n_hidden],
                initializer=tf.contrib.layers.xavier_initializer())
            b_hidden_1 = tf.get_variable(
                'b_hidden_1', [self.n_hidden],
                initializer=tf.contrib.layers.xavier_initializer())
            w_hidden_2 = tf.get_variable(
                'w_hidden_2', [self.n_hidden, self.n_hidden / 2],
                initializer=tf.contrib.layers.xavier_initializer())
            b_hidden_2 = tf.get_variable(
                'b_hidden_2', [self.n_hidden / 2],
                initializer=tf.contrib.layers.xavier_initializer())
            w_q = tf.get_variable('w_q', [self.n_hidden / 2, 1],
                                  initializer=tf.initializers.random_uniform(
                                      -0.003, 0.003))
            b_q = tf.get_variable('b_q', [1],
                                  initializer=tf.initializers.random_uniform(
                                      -0.003, 0.003))

        hidden_1 = tf.nn.leaky_relu(tf.matmul(input_vector,
                                              w_hidden_1)) + b_hidden_1
        hidden_2 = tf.nn.leaky_relu(tf.matmul(hidden_1,
                                              w_hidden_2)) + b_hidden_2
        q = tf.matmul(hidden_2, w_q) + b_q

        return q
コード例 #5
0
    def Model(self, training):
        with tf.variable_scope('encoder'):
            conv1 = model_utils.Conv1D(self.input_laser,
                                       2,
                                       5,
                                       4,
                                       scope='conv1',
                                       trainable=training)
            conv2 = model_utils.Conv1D(conv1,
                                       4,
                                       5,
                                       4,
                                       scope='conv2',
                                       trainable=training)
            conv3 = model_utils.Conv1D(conv2,
                                       8,
                                       5,
                                       4,
                                       scope='conv3',
                                       trainable=training)
            shape = conv3.get_shape().as_list()
            vector_laser = tf.reshape(conv3, (-1, shape[1] * shape[2]))

            embedding_w_goal = tf.get_variable('embedding_w_goal',
                                               [self.dim_action, self.dim_emb])
            embedding_b_goal = tf.get_variable('embedding_b_goal',
                                               [self.dim_emb])

            embedding_w_action = tf.get_variable(
                'embedding_w_action', [self.dim_action, self.dim_emb],
                trainable=training)
            embedding_b_action = tf.get_variable('embedding_b_action',
                                                 [self.dim_emb],
                                                 trainable=training)

            embedding_cmd = tf.get_variable('cmd_embedding',
                                            [self.n_cmd_type, self.dim_emb])

        with tf.variable_scope('controller'):
            vector_goal = tf.matmul(self.input_goal,
                                    embedding_w_goal) + embedding_b_goal
            vector_prev_action = tf.matmul(
                self.input_prev_action,
                embedding_w_action) + embedding_b_action
            vector_cmd = tf.reshape(
                tf.nn.embedding_lookup(embedding_cmd, self.input_cmd),
                [-1, self.dim_emb])
            vector_cmd_next = tf.reshape(
                tf.nn.embedding_lookup(embedding_cmd, self.input_cmd_next),
                [-1, self.dim_emb])

            controller_input = tf.concat([
                vector_laser, vector_cmd, vector_cmd_next, vector_goal,
                vector_prev_action
            ],
                                         axis=1)

            shape = controller_input.get_shape().as_list()
            w_hidden = tf.get_variable(
                'w_hidden', [shape[1], self.n_hidden],
                initializer=tf.contrib.layers.xavier_initializer(),
                trainable=training)
            b_hidden = tf.get_variable(
                'b_hidden', [self.n_hidden],
                initializer=tf.contrib.layers.xavier_initializer(),
                trainable=training)
            w_action_linear = tf.get_variable(
                'w_action_linear', [self.n_hidden, self.dim_action / 2],
                initializer=tf.contrib.layers.xavier_initializer(),
                trainable=training)
            b_action_linear = tf.get_variable(
                'b_action_linear', [self.dim_action / 2],
                initializer=tf.contrib.layers.xavier_initializer(),
                trainable=training)
            w_action_angular = tf.get_variable(
                'w_action_angular', [self.n_hidden, self.dim_action / 2],
                initializer=tf.contrib.layers.xavier_initializer(),
                trainable=training)
            b_action_angular = tf.get_variable(
                'b_action_angular', [self.dim_action / 2],
                initializer=tf.contrib.layers.xavier_initializer(),
                trainable=training)

            hidden = tf.nn.leaky_relu(
                tf.matmul(controller_input, w_hidden) + b_hidden)  # b*l. n
            a_linear = tf.nn.sigmoid(
                tf.matmul(hidden, w_action_linear) +
                b_action_linear) * self.action_range[0]
            a_angular = tf.nn.tanh(
                tf.matmul(hidden, w_action_angular) +
                b_action_angular) * self.action_range[1]
            pred_action = tf.concat([a_linear, a_angular], axis=1)

        return pred_action
コード例 #6
0
    def Model(self):
        with tf.variable_scope('encoder'):
            conv1 = model_utils.Conv1D(self.input_laser,
                                       2,
                                       5,
                                       4,
                                       scope='conv1')
            conv2 = model_utils.Conv1D(conv1, 4, 5, 4, scope='conv2')
            conv3 = model_utils.Conv1D(conv2, 8, 5, 4, scope='conv3')
            shape = conv3.get_shape().as_list()
            vector_laser = tf.reshape(conv3, (-1, shape[1] * shape[2]))

            embedding_w_goal = tf.get_variable('embedding_w_goal',
                                               [self.dim_action, self.dim_emb])
            embedding_b_goal = tf.get_variable('embedding_b_goal',
                                               [self.dim_emb])

            embedding_w_action = tf.get_variable(
                'embedding_w_action', [self.dim_action, self.dim_emb])
            embedding_b_action = tf.get_variable('embedding_b_action',
                                                 [self.dim_emb])

            embedding_cmd = tf.get_variable('cmd_embedding',
                                            [self.n_cmd_type, self.dim_emb])

        with tf.variable_scope('controller'):
            vector_goal = tf.matmul(self.input_goal,
                                    embedding_w_goal) + embedding_b_goal
            vector_prev_action = tf.matmul(
                self.input_prev_action,
                embedding_w_action) + embedding_b_action
            vector_action = tf.matmul(self.input_action,
                                      embedding_w_action) + embedding_b_action
            vector_cmd = tf.reshape(
                tf.nn.embedding_lookup(embedding_cmd, self.input_cmd),
                [-1, self.dim_emb])
            vector_cmd_next = tf.reshape(
                tf.nn.embedding_lookup(embedding_cmd, self.input_cmd_next),
                [-1, self.dim_emb])

            inputs = tf.concat([
                vector_laser, vector_cmd, vector_cmd_next, vector_goal,
                vector_prev_action, vector_action
            ],
                               axis=1)

            shape = inputs.get_shape().as_list()
            w_hidden = tf.get_variable(
                'w_hidden', [shape[1], self.n_hidden],
                initializer=tf.contrib.layers.xavier_initializer())
            b_hidden = tf.get_variable(
                'b_hidden', [self.n_hidden],
                initializer=tf.contrib.layers.xavier_initializer())
            w_q = tf.get_variable('w_q', [self.n_hidden, 1],
                                  initializer=tf.initializers.random_uniform(
                                      -0.003, 0.003))
            b_q = tf.get_variable('b_q', [1],
                                  initializer=tf.initializers.random_uniform(
                                      -0.003, 0.003))

        hidden = tf.nn.leaky_relu(tf.matmul(inputs, w_hidden)) + b_hidden

        q = tf.matmul(hidden, w_q) + b_q

        return q
コード例 #7
0
    def Model(self, inputs):
        laser, cmd, cmd_next, prev_action, obj_goal, action = inputs
        with tf.variable_scope('encoder'):

            embedding_w_goal = tf.get_variable('embedding_w_goal',
                                               [self.dim_action, self.dim_emb])
            embedding_b_goal = tf.get_variable('embedding_b_goal',
                                               [self.dim_emb])
            embedding_status = tf.get_variable(
                'embedding_status', [self.n_cmd_type**2, self.dim_emb])
            embedding_w_action = tf.get_variable(
                'embedding_w_action', [self.dim_action, self.dim_emb])
            embedding_b_action = tf.get_variable('embedding_b_action',
                                                 [self.dim_emb])
            embedding_w_status = tf.get_variable('embedding_w_status',
                                                 [self.dim_cmd, self.dim_emb])
            embedding_b_status = tf.get_variable('embedding_b_status',
                                                 [self.dim_emb])

            conv1 = model_utils.Conv1D(self.input_laser,
                                       2,
                                       5,
                                       4,
                                       scope='conv1')
            conv2 = model_utils.Conv1D(conv1, 4, 5, 4, scope='conv2')
            conv3 = model_utils.Conv1D(conv2, 8, 5, 4, scope='conv3')
            shape = conv3.get_shape().as_list()
            vector_laser = tf.reshape(conv3, (-1, shape[1] * shape[2]))
            curr_status = cmd * self.n_cmd_type + cmd_next
            vector_curr_status = tf.reshape(
                tf.nn.embedding_lookup(embedding_status, curr_status),
                (-1, self.dim_emb))
            vector_prev_action = tf.matmul(
                prev_action, embedding_w_action) + embedding_b_action
            vector_obj_goal = tf.matmul(obj_goal,
                                        embedding_w_goal) + embedding_b_goal
            vector_action = tf.matmul(action,
                                      embedding_w_action) + embedding_b_action

            input_vector = tf.concat([
                vector_laser, vector_curr_status, vector_prev_action,
                vector_obj_goal, vector_action
            ],
                                     axis=1)

        with tf.variable_scope('q'):
            rnn_cell = model_utils._lstm_cell(self.n_hidden,
                                              self.n_layers,
                                              name='rnn/basic_lstm_cell')
            w_q = tf.get_variable('w_q', [self.n_hidden, 1],
                                  initializer=tf.initializers.random_uniform(
                                      -0.003, 0.003))
            b_q = tf.get_variable('b_q', [1],
                                  initializer=tf.initializers.random_uniform(
                                      -0.003, 0.003))

            shape = input_vector.get_shape().as_list()
            input_vector_reshape = tf.reshape(
                input_vector, [self.batch_size, self.max_step, shape[1]])

            rnn_output, _ = tf.nn.dynamic_rnn(rnn_cell,
                                              input_vector_reshape,
                                              sequence_length=self.length,
                                              dtype=tf.float32)  # b, l, h
            rnn_output_reshape = tf.reshape(rnn_output,
                                            [-1, self.n_hidden])  # b*l, h
            q = tf.matmul(rnn_output_reshape, w_q) + b_q

        return q
コード例 #8
0
    def Encoder(self, input_laser, input_cmd, input_cmd_next, input_cmd_skip,
                input_obj_goal, label_status, label_action):
        with tf.variable_scope('encoder'):
            embedding_w_goal = tf.get_variable('embedding_w_goal',
                                               [self.dim_action, self.dim_emb],
                                               trainable=self.encoder_training)
            embedding_b_goal = tf.get_variable('embedding_b_goal',
                                               [self.dim_emb],
                                               trainable=self.encoder_training)
            embedding_status = tf.get_variable(
                'embedding_status', [self.n_cmd_type**2, self.dim_emb],
                trainable=self.encoder_training)
            # embedding_cmd = tf.get_variable('embedding_cmd', [self.n_cmd_type, self.dim_emb])
            embedding_w_action = tf.get_variable(
                'embedding_w_action', [self.dim_action, self.dim_emb],
                trainable=self.encoder_training)
            embedding_b_action = tf.get_variable(
                'embedding_b_action', [self.dim_emb],
                trainable=self.encoder_training)

            batch_size = self.batch_size / self.gpu_num
            # training input
            conv1 = model_utils.Conv1D(input_laser,
                                       2,
                                       5,
                                       4,
                                       scope='conv1',
                                       trainable=self.encoder_training)
            conv2 = model_utils.Conv1D(conv1,
                                       4,
                                       5,
                                       4,
                                       scope='conv2',
                                       trainable=self.encoder_training)
            conv3 = model_utils.Conv1D(conv2,
                                       8,
                                       5,
                                       4,
                                       scope='conv3',
                                       trainable=self.encoder_training)
            shape = conv3.get_shape().as_list()
            vector_laser = tf.reshape(conv3, (-1, shape[1] * shape[2]))

            curr_status = input_cmd * self.n_cmd_type + input_cmd_next
            vector_curr_status = tf.reshape(
                tf.nn.embedding_lookup(embedding_status, curr_status),
                (-1, self.dim_emb))

            # vector_cmd = tf.reshape(tf.nn.embedding_lookup(embedding_cmd, input_cmd), (-1, self.dim_emb))
            # vector_cmd_next = tf.reshape(tf.nn.embedding_lookup(embedding_cmd, input_cmd_next), (-1, self.dim_emb))

            action_reshape = tf.reshape(
                label_action,
                [batch_size, self.max_step, self.dim_action])  #  b, l, 2
            action_0_to_m1 = tf.slice(
                action_reshape, [0, 0, 0],
                [batch_size, self.max_step - 1, self.dim_action])
            prev_action_0 = tf.zeros([batch_size, 1, self.dim_action])
            prev_action = tf.reshape(
                tf.concat([prev_action_0, action_0_to_m1], axis=1),
                [-1, self.dim_action])
            self.prev_action = prev_action
            vector_prev_action = tf.matmul(
                prev_action, embedding_w_action) + embedding_b_action

            vector_obj_goal = tf.matmul(input_obj_goal,
                                        embedding_w_goal) + embedding_b_goal

            training_input = tf.concat([
                vector_laser, vector_curr_status, vector_prev_action,
                vector_obj_goal
            ],
                                       axis=1)
            self.training_input = training_input

            # testing input
            conv1 = model_utils.Conv1D(self.test_laser,
                                       2,
                                       5,
                                       4,
                                       scope='conv1',
                                       trainable=self.encoder_training,
                                       reuse=True)
            conv2 = model_utils.Conv1D(conv1,
                                       4,
                                       5,
                                       4,
                                       scope='conv2',
                                       trainable=self.encoder_training,
                                       reuse=True)
            conv3 = model_utils.Conv1D(conv2,
                                       8,
                                       5,
                                       4,
                                       scope='conv3',
                                       trainable=self.encoder_training,
                                       reuse=True)
            shape = conv3.get_shape().as_list()
            vector_laser_test = tf.reshape(conv3, (-1, shape[1] * shape[2]))

            vector_curr_status = tf.reshape(
                tf.nn.embedding_lookup(embedding_status, self.test_status),
                (-1, self.dim_emb))
            vector_prev_action_test = tf.matmul(
                self.test_prev_action, embedding_w_action) + embedding_b_action
            vector_obj_goal_test = tf.matmul(
                self.test_obj_goal, embedding_w_goal) + embedding_b_goal

            testing_input = tf.concat([
                vector_laser_test, vector_curr_status, vector_prev_action_test,
                vector_obj_goal_test
            ],
                                      axis=1)
            self.testing_input = testing_input
        return training_input, testing_input