Beispiel #1
0
    def __init__(self, input_dim, self_state_dim, num_layer, X_dim, wr_dims,
                 wh_dims, final_state_dim, gcn2_w1_dim, planning_dims,
                 similarity_function, layerwise_graph, skip_connection):
        super().__init__()
        # design choice

        # 'gaussian', 'embedded_gaussian', 'cosine', 'cosine_softmax', 'concatenation'
        self.similarity_function = 'embedded_gaussian'
        logging.info('self.similarity_func: {}'.format(
            self.similarity_function))
        human_state_dim = input_dim - self_state_dim
        self.self_state_dim = self_state_dim
        self.human_state_dim = human_state_dim
        self.num_layer = num_layer
        self.X_dim = X_dim
        self.layerwise_graph = layerwise_graph
        self.skip_connection = skip_connection

        self.w_r = mlp(self_state_dim, wr_dims, last_relu=True)
        self.w_h = mlp(human_state_dim, wh_dims, last_relu=True)

        self.w_a = Parameter(torch.randn(self.X_dim, self.X_dim))

        if num_layer == 1:
            self.w1 = Parameter(torch.randn(self.X_dim, final_state_dim))
        elif num_layer == 2:
            self.w1 = Parameter(torch.randn(self.X_dim, gcn2_w1_dim))
            self.w2 = Parameter(torch.randn(gcn2_w1_dim, final_state_dim))

        self.value_net = mlp(final_state_dim, planning_dims)

        # for visualization
        self.A = None
 def __init__(
     self,
     input_dim,
     self_state_dim,
     mlp1_dims,
     mlp2_dims,
     mlp3_dims,
     attention_dims,
     with_global_state,
     cell_size,
     cell_num,
 ):
     super().__init__()
     self.self_state_dim = self_state_dim
     self.global_state_dim = mlp1_dims[-1]
     self.mlp1 = mlp(input_dim, mlp1_dims, last_relu=True)
     self.mlp2 = mlp(mlp1_dims[-1], mlp2_dims)
     self.with_global_state = with_global_state
     if with_global_state:
         self.attention = mlp(mlp1_dims[-1] * 2, attention_dims)
     else:
         self.attention = mlp(mlp1_dims[-1], attention_dims)
     self.cell_size = cell_size
     self.cell_num = cell_num
     mlp3_input_dim = mlp2_dims[-1] + self.self_state_dim
     self.mlp3 = mlp(mlp3_input_dim, mlp3_dims)
     self.attention_weights = None
Beispiel #3
0
 def __init__(self, input_dim, self_state_dim, mlp1_dims, mlp_dims, lstm_hidden_dim):
     super().__init__()
     self.self_state_dim = self_state_dim
     self.lstm_hidden_dim = lstm_hidden_dim
     self.mlp1 = mlp(input_dim, mlp1_dims)
     self.mlp = mlp(self_state_dim + lstm_hidden_dim, mlp_dims)
     self.lstm = nn.LSTM(mlp1_dims[-1], lstm_hidden_dim, batch_first=True)
    def __init__(self,
                 embedding_dim=64,
                 h_dim=64,
                 mlp_dim=1024,
                 bottleneck_dim=1024,
                 self_state_dim=6,
                 activation='relu',
                 batch_norm=True,
                 dropout=0.0):
        super(PoolHiddenNet, self).__init__()

        self.mlp_dim = 1024
        self.h_dim = h_dim
        self.bottleneck_dim = bottleneck_dim
        self.embedding_dim = embedding_dim
        self.self_state_dim = self_state_dim

        mlp_pre_dim = embedding_dim + h_dim
        mlp_pre_pool_dims = [mlp_pre_dim, 512, bottleneck_dim]

        self.spatial_embedding = nn.Sequential(  #CHECK KON RELU MIKHAD YA NA?
            nn.Linear(2, embedding_dim),
            nn.ReLU(),
        )

        self.mlp_pre_pool = mlp(mlp_pre_pool_dims[0],
                                mlp_pre_pool_dims[1:],
                                last_relu=True)
Beispiel #5
0
    def __init__(self, input_dim, self_state_dim, joint_state_dim, in_mlp_dims,
                 sort_mlp_dims, action_dims, sort_attention_dims, nheads,
                 dropout, with_global_state):
        super().__init__()
        self.device = torch.device(
            "cuda:0" if torch.cuda.is_available() else "cpu")
        self.self_state_dim = self_state_dim
        self.global_state_dim = in_mlp_dims[-1]
        self.sort_mlp_global_state_dim = sort_mlp_dims[-1]
        self.joint_state_dim = joint_state_dim
        self.in_mlp_dims = in_mlp_dims
        self.heads = nheads
        self.dropout = dropout
        self.input_dim = input_dim
        self.lstm_hidden_dim = in_mlp_dims[-1] * 2
        self.with_global_state = with_global_state

        self.in_mlp = mlp(self.input_dim, in_mlp_dims, last_relu=True)
        # avg+mlp1
        if self.with_global_state:
            self.sort_mlp = mlp(
                self.in_mlp_dims[-1] * 2 + self.joint_state_dim, sort_mlp_dims)
        else:
            self.sort_mlp = mlp(self.in_mlp_dims[-1] * 2, sort_mlp_dims)
        # avg+mlp2
        # (avg+mlp2)
        self.sort_mlp_attention = mlp(sort_mlp_dims[-1] * 2,
                                      sort_attention_dims)
        # self.attention = mlp(sort_attention_dims[-1]*2, attention_dims)
        # add a soft_max layer after soft_mlp_attentions

        self.lstm = nn.LSTM(sort_mlp_dims[-1] * 2,
                            self.lstm_hidden_dim,
                            batch_first=True)

        action_input_dim = self.lstm_hidden_dim + self.self_state_dim  # 50 + 6
        self.action_mlp = mlp(action_input_dim, action_dims)  #56,150,100,100,1
        self.attention_weights = None
    def __init__(self, input_dim, self_state_dim, mlp1_dims, mlp2_dims, mlp3_dims, attention_dims, with_global_state,
                 cell_size, cell_num):
        super().__init__()

        ##
        # print('')
        # print('---------- debug: value network ----------')
        # print('class ValueNetwork(nn.Module):   is created')

        ##
        self.self_state_dim = self_state_dim
        self.global_state_dim = mlp1_dims[-1]
        self.mlp1 = mlp(input_dim, mlp1_dims, last_relu=True)
        self.mlp2 = mlp(mlp1_dims[-1], mlp2_dims)
        self.with_global_state = with_global_state
        if with_global_state: #ture
            self.attention = mlp(mlp1_dims[-1] * 2, attention_dims)
        else:
            self.attention = mlp(mlp1_dims[-1], attention_dims)
        self.cell_size = cell_size
        self.cell_num = cell_num
        mlp3_input_dim = mlp2_dims[-1] + self.self_state_dim # =  50  + 6
        self.mlp3 = mlp(mlp3_input_dim, mlp3_dims)
        self.attention_weights = None
Beispiel #7
0
    def __init__(self,
                 generator,
                 obs_len,
                 device,
                 policy_learning,
                 h_dim=64,
                 info_dim=8):
        super().__init__()

        self.policy_learning = policy_learning
        self.obs_len = obs_len
        self.info_dim = info_dim
        self.encoder = generator.encoder
        self.h_dim = h_dim
        self.device = device
        self.generator = generator.to(self.device)
        #self.mlp = mlp(self.encoder.h_dim + self.generator.pool_net.mlp_dim + self.info_dim,[self.h_dim,32,16,1]).to(self.device)
        self.policy_mlp = mlp(
            self.encoder.h_dim + self.generator.pool_net.mlp_dim +
            self.info_dim, [self.h_dim, 32, 16, 2]).to(self.device)
    def __init__(self, input_dim, self_state_dim, mlp1_dims, mlp2_dims,
                 mlp3_dims, attention_dims, with_global_state, cell_size,
                 cell_num):
        super().__init__()
        self.self_state_dim = self_state_dim
        #print('self_state_dim = ',self.self_state_dim)
        self.global_state_dim = mlp1_dims[-1]
        self.encoder = Encoder(embedding_dim=64,
                               h_dim=64,
                               self_state_dim=self.self_state_dim)
        self.pooling = PoolHiddenNet(embedding_dim=64,
                                     h_dim=64,
                                     mlp_dim=1024,
                                     bottleneck_dim=1024)

        self.cell_size = cell_size
        self.cell_num = cell_num
        mlp3_input_dim = 1024 + 6  # PM_size + robot(self)_state
        self.mlp3 = mlp(mlp3_input_dim, mlp3_dims)
        self.attention_weights = None
Beispiel #9
0
    def __init__(self,
                 input_dim,
                 self_state_dim,
                 joint_state_dim,
                 in_mlp_dims,
                 ia_mlp_dims,
                 sort_mlp_dims,
                 sort_attention_dims,
                 aggregation_dims,
                 action_dims,
                 with_global_state=True,
                 with_interaction=True,
                 with_om=False):
        super().__init__()
        self.device = torch.device(
            "cuda:0" if torch.cuda.is_available() else "cpu")
        self.input_dim = input_dim
        self.self_state_dim = self_state_dim
        self.joint_state_dim = joint_state_dim
        self.gru_hidden_dim = ia_mlp_dims[-1] * 2
        self.with_global_state = with_global_state
        self.with_om = with_om
        self.with_interaction = with_interaction

        self.input_mlp = mlp(self.input_dim - self.self_state_dim,
                             in_mlp_dims,
                             last_relu=True)  # [B,C,N]

        if self.with_interaction:
            self.mlp = conv_mlp2(ia_mlp_dims[-1], ia_mlp_dims, (3, 3))
            self.afa_mlp = AFAModule(self.mlp, use_softmax=True)  #[B,C,N]
            self.ia_mlp = mlp(ia_mlp_dims[-1], ia_mlp_dims)
            self.sort_mlp = mlp(in_mlp_dims[-1] + self_state_dim,
                                sort_mlp_dims)  #[B,C*2,N]
        else:
            self.sort_mlp = mlp(in_mlp_dims[-1] * 2 + self_state_dim,
                                sort_mlp_dims)  #[B,C*2+13,N]

        # self.h0 = None
        # self.hn = None
        self.gru = GRU(ia_mlp_dims[-1] * 2, self.gru_hidden_dim)
        self.sort_mlp_attention = mlp(sort_mlp_dims[-1] * 2,
                                      sort_attention_dims)
        # self.lstm = nn.LSTM(sort_mlp_dims[-1]*2,  self.lstm_hidden_dim, batch_first=True)
        action_input_dim = self.gru_hidden_dim + self.self_state_dim  # 64 + 6
        # self.action_mlp = conv_mlp(action_input_dim, action_dims) #56,128,64,32,1
        self.action_mlp = mlp(action_input_dim, action_dims)  #56,128,64,64,1
        self.attention_weights = None
Beispiel #10
0
    def __init__(self,
                 input_dim,
                 self_state_dim,
                 joint_state_dim,
                 in_mlp_dims,
                 action_dims,
                 with_dynamic_net=True,
                 with_global_state=False):
        super().__init__()
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        self.self_state_dim = self_state_dim
        self.global_state_dim = in_mlp_dims[-1]
        self.joint_state_dim = joint_state_dim
        self.in_mlp_dims = in_mlp_dims
        self.input_dim = input_dim
        self.gru_hidden_dim = in_mlp_dims[-1] * 2 + self.joint_state_dim
        self.with_dynamic_net = with_dynamic_net
        self.with_global_state = with_global_state

        self.in_mlp = mlp2(self.input_dim, in_mlp_dims, last_relu=True)
        if self.with_dynamic_net:
            # self.gru = GRU(self.in_mlp_dims[-1]*2, self.gru_hidden_dim)
            if self.with_global_state:
                self.sort_mlp = ATCBasic(self.joint_state_dim,
                                         self.gru_hidden_dim,
                                         epsilon=0.05)
                action_input_dim = self.gru_hidden_dim + self.self_state_dim  # 64 + 6
            else:
                self.sort_mlp = ATCBasic(self.joint_state_dim,
                                         in_mlp_dims[-1],
                                         epsilon=0.05)
                action_input_dim = in_mlp_dims[
                    -1] + self.self_state_dim  # 64 + 6

        self.action_mlp = mlp(action_input_dim, action_dims)  #56,150,100,100,1
        self.attention_weights = None
Beispiel #11
0
    def __init__(self, input_dim, self_state_dim, mlp1_dims, mlp2_dims,
                 mlp3_dims, attention_dims, with_global_state, cell_size,
                 cell_num):
        super().__init__()
        self.self_state_dim = self_state_dim
        self.global_state_dim = mlp1_dims[-1]
        self.mlp1 = mlp(input_dim, mlp1_dims, last_relu=True)
        self.mlp2 = mlp(mlp1_dims[-1], mlp2_dims)
        self.with_global_state = with_global_state
        if with_global_state:
            self.attention = mlp(mlp1_dims[-1] * 2, attention_dims)
        else:
            self.attention = mlp(mlp1_dims[-1], attention_dims)
        self.cell_size = cell_size
        self.cell_num = cell_num
        mlp3_input_dim = mlp2_dims[-1] + self.self_state_dim
        self.mlp3 = mlp(mlp3_input_dim, mlp3_dims)
        self.mlp3a = mlp(mlp3_input_dim, [150, 100, 80])
        self.attention_weights = None

        self.advantage_stream = nn.Sequential(nn.Linear(80, 80), nn.ReLU(),
                                              nn.Linear(80, 1))
Beispiel #12
0
 def __init__(self, input_dim, self_state_dim, mlp_dims, lstm_hidden_dim):
     super(ValueNetwork1, self).__init__()
     self.self_state_dim = self_state_dim
     self.lstm_hidden_dim = lstm_hidden_dim
     self.mlp = mlp(self_state_dim + lstm_hidden_dim, mlp_dims)
     self.lstm = nn.LSTM(input_dim, lstm_hidden_dim, batch_first=True)
Beispiel #13
0
    def __init__(self,
                 input_dim,
                 self_state_dim,
                 joint_state_dim,
                 in_mlp_dims,
                 ia_mlp_dims,
                 sort_mlp_dims,
                 sort_attention_dims,
                 aggregation_dims,
                 action_dims,
                 gamma=0.9,
                 time_step=0.2,
                 v_pref=1.0,
                 with_global_state=True,
                 with_interaction=True,
                 with_om=False):
        super().__init__()
        # self.device = torch.device("cpu")
        self.gpu = True
        self.gamma = gamma
        self.time_step = time_step
        self.v_pref = v_pref
        self.device = torch.device(
            "cuda:0" if torch.cuda.is_available() and self.gpu else "cpu")
        self.input_dim = input_dim
        self.self_state_dim = self_state_dim
        self.joint_state_dim = joint_state_dim
        self.gru_hidden_dim = ia_mlp_dims[-1] * 2
        self.with_global_state = with_global_state
        self.with_om = with_om
        self.with_interaction = with_interaction

        self.input_mlp = mlp(self.input_dim - self.self_state_dim,
                             in_mlp_dims,
                             last_relu=True)  # [B,C,N]

        if self.with_interaction:
            self.mlp = conv_mlp2(ia_mlp_dims[-1], ia_mlp_dims)
            self.afa_mlp = AFAModule(self.mlp, use_softmax=True)  #[B,C,N]
            self.ia_mlp = mlp(ia_mlp_dims[-1], ia_mlp_dims)

        # avg+mlp1
        if self.with_global_state:
            self.sort_mlp = mlp(in_mlp_dims[-1] * 2,
                                sort_mlp_dims)  #[B,C*2+13,N]
        else:
            self.sort_mlp = mlp(in_mlp_dims[-1] * 2, sort_mlp_dims)  #[B,C*2,N]
        # self.gru = nn.GRU(ia_mlp_dims[-1]*2, self.gru_hidden_dim, batch_first=True)
        # avg+mlp2cd
        # (avg+mlp2)
        self.sort_mlp_attention = mlp(sort_mlp_dims[-1] * 2,
                                      sort_attention_dims)

        self.gru = GRU(ia_mlp_dims[-1] * 2, self.gru_hidden_dim)
        self.gru2 = GRU(ia_mlp_dims[-1] * 2, self.gru_hidden_dim)
        self.gru3 = GRU(ia_mlp_dims[-1] * 2, self.gru_hidden_dim)
        self.motion_filter = mlp(self.gru_hidden_dim * 2,
                                 aggregation_dims,
                                 last_relu=True)
        self.motion_filter2 = mlp(self.gru_hidden_dim * 2,
                                  aggregation_dims,
                                  last_relu=True)

        action_input_dim = self.gru_hidden_dim + self.self_state_dim  # 64 + 6
        self.action_mlp = mlp(action_input_dim, action_dims)  #56,128,64,64,1
        self.attention_weights = None