예제 #1
0
    def __init_softmax_generation(self, node_hidden_size,
                                  class_conditioning_size, num_layers,
                                  hidden_size):
        # Function that sets up the class to use softmax generation for edge types
        # when forward is called.

        input_size = (2 * node_hidden_size) + class_conditioning_size
        self.choose_x_shift = common.MLP(num_layers,
                                         hidden_size,
                                         input_size,
                                         output_size=self.num_shifts)
        self.choose_z_shift = common.MLP(num_layers,
                                         hidden_size,
                                         input_size,
                                         output_size=self.num_shifts)

        # Uniform weighting across each shift
        self.x_class_weights = torch.ones(self.num_shifts)
        self.z_class_weights = torch.ones(self.num_shifts)

        self.generate_edge_type = self.generate_edge_type_softmax
    def __init__(self, g_ops, **kwargs):
        super(AddEdge, self).__init__()
        self.graph_op = {'embed': g_ops.graph_embed}
        graph_hidden_size = g_ops.graph_hidden_size

        class_conditioning_size = kwargs['class_conditioning_size']
        num_layers = kwargs['num_decision_layers']
        hidden_size = kwargs['decision_layer_hidden_size']
        node_hidden_size = kwargs['node_hidden_size']

        # Output is a 3D vector - decision to not add an edge, and decisions
        # for an incoming/outgoing edge from the newly added node
        input_size = graph_hidden_size + node_hidden_size + class_conditioning_size
        self.add_edge = common.MLP(num_layers,
                                   hidden_size,
                                   input_size,
                                   output_size=3)

        self.get_actions = lambda batch_prob, *args, **kwargs: Categorical(
            batch_prob).sample()
예제 #3
0
    def __init__(self, num_prop_rounds, num_mlp_layers, mlp_hidden_size,
                 node_hidden_size, edge_hidden_size, num_shifts,
                 edge_embedding):
        super(DGMGGraphProp, self).__init__()
        self.num_prop_rounds = num_prop_rounds

        # Setting from the paper
        self.node_activation_hidden_size = 2 * node_hidden_size

        if edge_embedding == 'one-hot':
            # Overwrite edge_hidden_size - passed value only used when edge_embedding == 'embedding'
            edge_hidden_size = num_shifts * 2
        elif edge_embedding == 'ordinal':
            edge_hidden_size = (num_shifts - 1) * 2
        elif edge_embedding == 'one-hot-big':
            # Overwrite edge_hidden_size - passed value only used when edge_embedding == 'embedding'
            edge_hidden_size = num_shifts**2
        elif edge_embedding == 'scalar':
            edge_hidden_size = 2

        message_funcs = []
        node_update_funcs = []
        self.reduce_funcs = []
        message_func_input_size = (2 * node_hidden_size) + edge_hidden_size

        for t in range(num_prop_rounds):
            # input being [hv, hu, xuv]
            message_funcs.append(
                common.MLP(num_mlp_layers,
                           mlp_hidden_size,
                           message_func_input_size,
                           output_size=self.node_activation_hidden_size))
            self.reduce_funcs.append(partial(self.dgmg_reduce, round=t))

            node_update_funcs.append(
                nn.GRUCell(self.node_activation_hidden_size, node_hidden_size))
            self.update_node = lambda round, message_vector, node_embed: \
                self.node_update_funcs[round](message_vector, node_embed)

        self.message_funcs = nn.ModuleList(message_funcs)
        self.node_update_funcs = nn.ModuleList(node_update_funcs)
    def __init__(self, g_ops, **kwargs):
        super(AddNode, self).__init__()
        self.graph_op = {'embed': g_ops.graph_embed, 'prop': g_ops.graph_prop}
        graph_hidden_size = g_ops.graph_hidden_size

        class_conditioning_size = kwargs['class_conditioning_size']
        num_layers = kwargs['num_decision_layers']
        hidden_size = kwargs['decision_layer_hidden_size']
        num_node_types = kwargs['num_node_types']
        self.node_hidden_size = kwargs['node_hidden_size']

        self.stop = 0
        # Output is num_node_types + 1 - decisions for each node type, as well as decision to not add
        # a node of any type
        input_size = graph_hidden_size + class_conditioning_size
        self.add_node = common.MLP(num_layers,
                                   hidden_size,
                                   input_size,
                                   output_size=num_node_types + 1)

        self.device = torch.device(
            'cuda') if torch.cuda.is_available() else torch.device('cpu')

        # If to add a node, initialize its hv
        self.node_type_embed = nn.Embedding(num_node_types + 1,
                                            self.node_hidden_size)
        self.initialize_hv = nn.Linear(self.node_hidden_size + \
                                       graph_hidden_size,
                                       self.node_hidden_size)
        self.node_embed_func = lambda node_type, graph_embed: torch.cat([
            self.node_type_embed(
                torch.LongTensor([node_type - 1]).to(self.device)), graph_embed
        ],
                                                                        dim=1)
        self._initialize_node_repr = self.initialize_node_DGMG

        self.get_actions = lambda batch_prob, *args, **kwargs: Categorical(
            batch_prob).sample()

        self.init_node_activation = torch.zeros(1, 2 * self.node_hidden_size)
    def __init__(self, **kwargs):
        super().__init__()
        class_conditioning_size = kwargs['class_conditioning_size']
        num_layers = kwargs['num_decision_layers']
        hidden_size = kwargs['decision_layer_hidden_size']
        node_hidden_size = kwargs['node_hidden_size']
        force_valid = kwargs['force_valid']

        input_size = (2 * node_hidden_size) + class_conditioning_size
        self.choose_dest = common.MLP(num_layers,
                                      hidden_size,
                                      input_size,
                                      output_size=1)

        if force_valid:
            self.apply_softmax_temperature = self.remove_invalid_decisions
        else:
            self.apply_softmax_temperature = lambda *args, **kwargs: None

        self.get_actions = lambda batch_prob, *args, **kwargs: Categorical(
            torch.squeeze(batch_prob, dim=2)).sample().flatten()

        self.total_sum_dist = torch.zeros(1)
        self.class_sum_dist = torch.zeros(12, 1)