Exemple #1
0
    def __init__(self, config, robot_state_dim, human_state_dim):
        """ The current code might not be compatible with models trained with previous version
        """
        super().__init__()
        self.multiagent_training = config.gcn.multiagent_training
        num_layer = config.gcn.num_layer
        X_dim = config.gcn.X_dim
        wr_dims = config.gcn.wr_dims
        wh_dims = config.gcn.wh_dims
        final_state_dim = config.gcn.final_state_dim
        similarity_function = config.gcn.similarity_function
        layerwise_graph = config.gcn.layerwise_graph
        skip_connection = config.gcn.skip_connection

        # design choice

        # 'gaussian', 'embedded_gaussian', 'cosine', 'cosine_softmax', 'concatenation'
        self.similarity_function = similarity_function
        self.robot_state_dim = robot_state_dim
        self.human_state_dim = human_state_dim
        self.num_layer = num_layer
        self.X_dim = X_dim
        self.layerwise_graph = layerwise_graph
        self.skip_connection = skip_connection

        logging.info('Similarity_func: {}'.format(self.similarity_function))
        logging.info('Layerwise_graph: {}'.format(self.layerwise_graph))
        logging.info('Skip_connection: {}'.format(self.skip_connection))
        logging.info('Number of layers: {}'.format(self.num_layer))

        self.w_r = mlp(robot_state_dim, wr_dims, last_relu=True)
        self.w_h = mlp(human_state_dim, wh_dims, last_relu=True)

        if self.similarity_function == 'embedded_gaussian':
            self.w_a = Parameter(torch.randn(self.X_dim, self.X_dim))
            nn.init.orthogonal_(self.w_a.data)
        elif self.similarity_function == 'concatenation':
            self.w_a = mlp(2 * X_dim, [2 * X_dim, 1], last_relu=True)

        self.w_v = mlp(X_dim, [X_dim], last_relu=True)

        # TODO: try other dim size
        embedding_dim = self.X_dim
        self.Ws = torch.nn.ParameterList()
        for i in range(self.num_layer):
            if i == 0:
                self.Ws.append(
                    Parameter(torch.randn(self.X_dim, embedding_dim)))
            elif i == self.num_layer - 1:
                self.Ws.append(
                    Parameter(torch.randn(embedding_dim, final_state_dim)))
            else:
                self.Ws.append(
                    Parameter(torch.randn(embedding_dim, embedding_dim)))

        # for visualization
        self.attention_weights = None
Exemple #2
0
 def __init__(self, config, graph_model1, graph_model2, action_dim):
     super(Critic, self).__init__()
     # Q1 architecture
     self.graph_model1 = graph_model2
     self.score_network1 = mlp(config.gcn.X_dim + action_dim, [256, 256, 1])
     # Q2 architecture
     self.graph_model2 = graph_model2
     self.score_network2 = mlp(config.gcn.X_dim + action_dim, [256, 256, 1])
     self.action_dim = action_dim
    def __init__(self, config, robot_state_dim, human_state_dim, shared_gcn=True):
        super().__init__()
        self.shared_gcn = shared_gcn
        if shared_gcn:
            self.graph_model = RGL(config, robot_state_dim, human_state_dim)
        else:
            self.graph_model_val = RGL(config, robot_state_dim, human_state_dim)
            self.graph_model_act = RGL(config, robot_state_dim, human_state_dim)

        self.value_network = mlp(config.gcn.X_dim, config.rgl_ppo.value_network_dims)
        self.action_network = mlp(config.gcn.X_dim, config.rgl_ppo.value_network_dims[:-1] +
                                  [4])
Exemple #4
0
    def __init__(self, in_features, out_features, concat=True):
        super(GraphAttentionLayer, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.concat = concat

        self.w_a = mlp(2 * self.in_features, [2 * self.in_features, 1],
                       last_relu=False)
        self.leakyrelu = nn.LeakyReLU(negative_slope=0.04)
Exemple #5
0
 def __init__(self, config, graph_model, action_dim, max_action, min_action):
     super(Actor, self).__init__()
     self.graph_model = graph_model
     self.action_network = mlp(config.gcn.X_dim, [256, action_dim])
     self.max_action = None
     self.min_action = None
     self.action_dim = action_dim
     self.action_amplitude = max_action
     self.action_middle = min_action
Exemple #6
0
 def __init__(self, config, graph_model, time_step):
     """
     This function predicts the next state given the current state as input.
     It uses a graph model to encode the state into a latent space and predict each human's next state.
     """
     super().__init__()
     self.trainable = True
     self.kinematics = config.action_space.kinematics
     self.graph_model = graph_model
     self.human_motion_predictor = mlp(
         config.gcn.X_dim, config.model_predictive_rl.motion_predictor_dims)
     self.time_step = time_step
Exemple #7
0
    def __init__(self, config, robot_state_dim, human_state_dim):
        """ The current code might not be compatible with models trained with previous version
        """
        super().__init__()
        self.multiagent_training = config.gcn.multiagent_training
        num_layer = config.gcn.num_layer
        X_dim = config.gcn.X_dim
        wr_dims = config.gcn.wr_dims
        wh_dims = config.gcn.wh_dims
        final_state_dim = config.gcn.final_state_dim
        similarity_function = config.gcn.similarity_function
        layerwise_graph = config.gcn.layerwise_graph
        skip_connection = config.gcn.skip_connection

        # design choice

        # 'gaussian', 'embedded_gaussian', 'cosine', 'cosine_softmax', 'concatenation'
        self.similarity_function = similarity_function
        self.robot_state_dim = robot_state_dim
        self.human_state_dim = human_state_dim
        self.num_layer = num_layer
        self.X_dim = X_dim
        self.layerwise_graph = layerwise_graph
        self.skip_connection = skip_connection
        self.gat0 = GraphAttentionLayer(self.X_dim, self.X_dim)
        self.gat1 = GraphAttentionLayer(self.X_dim, self.X_dim)

        logging.info('Similarity_func: {}'.format(self.similarity_function))
        logging.info('Layerwise_graph: {}'.format(self.layerwise_graph))
        logging.info('Skip_connection: {}'.format(self.skip_connection))
        logging.info('Number of layers: {}'.format(self.num_layer))

        self.w_r = mlp(robot_state_dim, wr_dims, last_relu=True)
        self.w_h = mlp(human_state_dim, wh_dims, last_relu=True)
        # for visualization
        self.attention_weights = None
Exemple #8
0
 def __init__(self, config, graph_model):
     super().__init__()
     self.graph_model = graph_model
     self.value_network = mlp(config.gcn.X_dim, config.model_predictive_rl.value_network_dims)