Example #1
0
    def __init__(self, input_dim, hidden_dim, embedding_dim, num_layers,
                 pred_hidden_dims=[], concat=True, bn=True, dropout=0.0, args=None, use_cuda=True, num_aggs=1):
        super(GcnEncoderGraph, self).__init__()
        self.concat = concat
        add_self = not concat
        self.bn = bn
        self.num_layers = num_layers
        self.num_aggs = num_aggs
        self.use_cuda = use_cuda

        if use_cuda:
            self.device = get_torch_device()
        else:
            self.device = torch.device("cpu")

        self.bias = True
        if args is not None:
            self.bias = args.bias

        self.conv_first, self.conv_block, self.conv_last = self.build_conv_layers(
            input_dim, hidden_dim, embedding_dim, num_layers,
            add_self, normalize=True, dropout=dropout)
        self.act = nn.ReLU()

        if concat:
            self.pred_input_dim = hidden_dim * (num_layers - 1) + embedding_dim
        else:
            self.pred_input_dim = embedding_dim

        for m in self.modules():
            if isinstance(m, GraphConv):
                m.weight.data = init.xavier_uniform(m.weight.data, gain=nn.init.calculate_gain('relu'))
                if m.bias is not None:
                    m.bias.data = init.constant(m.bias.data, 0.0)
Example #2
0
    def __init__(self, input_dim, node_embedding_dim, graph_embedding_dim, max_num_nodes=10, gcn_num_layers=2,
                 num_pooling=1, assign_dim=40, num_aggs=1, use_cuda=True):
        super(GraphTD3, self).__init__()
        self.node_embedding_dim = node_embedding_dim
        self.graph_embedding_dim = graph_embedding_dim
        self.input_dim = input_dim
        if use_cuda:
            self.device = utls.get_torch_device()
        else:
            self.device = torch.device("cpu")

        self.graph_embedder = SoftPoolingGcnEncoder(max_num_nodes=max_num_nodes, input_dim=input_dim,
                                                    hidden_dim=node_embedding_dim,
                                                    embedding_dim=graph_embedding_dim, num_layers=gcn_num_layers,
                                                    num_pooling=num_pooling,
                                                    assign_hidden_dim=assign_dim, num_aggs=num_aggs).to(self.device)

        self.actor = Actor(graph_embedding_dim, node_embedding_dim).to(self.device)
        self.critic = Critic(graph_embedding_dim, node_embedding_dim, 1).to(self.device)
        self.input_dim = input_dim
        self.node_embedding_dim = node_embedding_dim
        self.graph_embedding_dim = graph_embedding_dim
Example #3
0
    def __init__(self, input_dim, state_dim, action_dim, replayBuff, lr=1e-3, gamma=0.99, eta=1e-3,
                 gcn_num_layers=2, num_pooling=1, assign_hidden_dim=40, assign_dim=40, num_aggs=1, use_cuda=True):
        self.state_dim = state_dim
        self.action_dim = action_dim
        self.input_dim = input_dim

        self.lr = lr

        self.it = 0
        self.replay = replayBuff
        if use_cuda:
            self.device = utls.get_torch_device()
        else:
            self.device = torch.device("cpu")

        self.actor_critic = GraphTD3(input_dim=input_dim, node_embedding_dim=action_dim, graph_embedding_dim=state_dim,
                                     use_cuda=use_cuda,
                                     gcn_num_layers=gcn_num_layers, num_pooling=num_pooling, assign_dim=assign_dim,
                                     num_aggs=num_aggs, max_num_nodes=assign_hidden_dim).to(self.device)

        self.target_actor_critic = GraphTD3(input_dim=input_dim, node_embedding_dim=action_dim,
                                            graph_embedding_dim=state_dim, use_cuda=use_cuda,
                                            gcn_num_layers=gcn_num_layers, num_pooling=num_pooling,
                                            assign_dim=assign_dim, num_aggs=num_aggs,
                                            max_num_nodes=assign_hidden_dim).to(self.device)

        # self.actor_critic_opt = torch.optim.Adam(self.actor_critic.parameters(),self.lr, weight_decay=0.001)
        self.critic_opt = torch.optim.Adam(
            list(self.actor_critic.critic.parameters()) + list(self.actor_critic.graph_embedder.parameters()), self.lr,
            weight_decay=0.001)
        # self.actor_opt = torch.optim.Adam(self.actor_critic.actor.parameters(), self.lr, weight_decay=0.001)

        self.gamma = gamma
        self.eta = eta

        utls.copy_parameters(self.target_actor_critic, self.actor_critic)