def __init__(self, model_id, name="CNNMLPDecoderGraphIndependent"):
        super(CNNMLPDecoderGraphIndependent, self).__init__(name=name)
        with self._enter_variable_scope():
            self.model_id = model_id

            self.visual_decoder = get_model_from_config(
                model_id=self.model_id,
                model_type="visual_decoder")(is_training=False,
                                             name="visual_decoder")

            # --------------- SKIP CONNECTION --------------- #
            #self.visual_decoder.skip1 = skip1
            #self.visual_decoder.skip2 = skip2
            #self.visual_decoder.skip3 = skip3

            self._network = modules.GraphIndependent(
                edge_model_fn=lambda: get_model_from_config(
                    model_id=self.model_id, model_type="mlp")
                (n_neurons=EncodeProcessDecode_v5_no_skip_no_train_flags.
                 n_neurons_edges,
                 n_layers=EncodeProcessDecode_v5_no_skip_no_train_flags.
                 n_layers_edges,
                 output_size=EncodeProcessDecode_v5_no_skip_no_train_flags.
                 edge_output_size,
                 typ="mlp_transform",
                 activation_final=False,
                 name="mlp_decoder_edge"),
                node_model_fn=lambda: get_model_from_config(
                    model_id=self.model_id,
                    model_type="visual_and_latent_decoder")
                (self.visual_decoder, name="visual_and_latent_node_decoder"),
                global_model_fn=lambda: get_model_from_config(
                    model_id=self.model_id, model_type="mlp")
                (n_neurons=EncodeProcessDecode_v5_no_skip_no_train_flags.
                 n_neurons_globals,
                 n_layers=EncodeProcessDecode_v5_no_skip_no_train_flags.
                 n_layers_globals,
                 output_size=EncodeProcessDecode_v5_no_skip_no_train_flags.
                 global_output_size,
                 typ="mlp_transform",
                 activation_final=False,
                 name="mlp_decoder_global"))
コード例 #2
0
 def __init__(self,
              num_cores=3,
              edge_output_size=None,
              node_output_size=None,
              global_output_size=None,
              global_block=True,
              latent_size=16,
              num_layers=2,
              concat_encoder=True,
              name="EncodeProcessDecodeNonRecurrent"):
     super(EncodeProcessDecodeNonRecurrent, self).__init__(name=name)
     # support_modes = snt.mixed_precision.modes([tf.float32, tf.float16])
     # snt.Linear.__call__ = support_modes(snt.Linear.__call__)
     # snt.mixed_precision.enable(tf.float16)
     self._encoder = MLPGraphIndependent(latent_size=latent_size,
                                         num_layers=num_layers)
     self._cores = [
         MLPGraphNetwork(latent_size=latent_size,
                         num_layers=num_layers,
                         global_block=global_block)
         for _ in range(num_cores)
     ]
     self._decoder = MLPGraphIndependent(latent_size=latent_size,
                                         num_layers=num_layers)
     self.concat_encoder = concat_encoder
     # Transforms the outputs into the appropriate shapes.
     if edge_output_size is None:
         edge_fn = None
     else:
         edge_fn = lambda: snt.Linear(edge_output_size, name="edge_output")
     if node_output_size is None:
         node_fn = None
     else:
         node_fn = lambda: snt.Linear(node_output_size, name="node_output")
     if global_output_size is None:
         global_fn = None
     else:
         global_fn = lambda: snt.Linear(global_output_size,
                                        name="global_output")
     with self.name_scope:
         self._output_transform = modules.GraphIndependent(
             edge_fn, node_fn, global_fn)
コード例 #3
0
    def _build(self, inputs, is_training, verbose=VERBOSITY):
        """" re-initializing _network because it is currently not possible to pass the is_training flag at init() time """
        visual_encoder = get_model_from_config(model_id=self.model_id,
                                               model_type="visual_encoder")(
                                                   is_training=is_training,
                                                   name="visual_encoder")
        self._network = modules.GraphIndependent(
            edge_model_fn=lambda: get_model_from_config(self.model_id,
                                                        model_type="mlp")
            (n_neurons=EncodeProcessDecode_v3_172_latent_dim.n_neurons_edges,
             n_layers=EncodeProcessDecode_v3_172_latent_dim.n_layers_edges,
             output_size=None,
             typ="mlp_layer_norm",
             name="mlp_encoder_edge"),
            node_model_fn=lambda: get_model_from_config(
                self.model_id, model_type="visual_and_latent_encoder")
            (visual_encoder, name="visual_and_latent_node_encoder"),
            global_model_fn=None)

        return self._network(inputs)
コード例 #4
0
    def __init__(self, name="SegmentClassifier"):
        super(SegmentClassifier, self).__init__(name=name)

        self._edge_block = blocks.EdgeBlock(
            edge_model_fn=lambda : snt.nets.MLP([LATENT_SIZE]*2,
                                                activation=tf.nn.relu,
                                                activate_final=True,
                                                use_dropout=True
                                               ),
            #edge_model_fn=make_mlp_model,
            use_edges=False,
            use_receiver_nodes=True,
            use_sender_nodes=True,
            use_globals=False,
            name='edge_encoder_block'
        )
        self._node_encoder_block = blocks.NodeBlock(
            node_model_fn=make_mlp_model,
            use_received_edges=False,
            use_sent_edges=False,
            use_nodes=True,
            use_globals=False,
            name='node_encoder_block'
        )

        self._core = InteractionNetwork(
            edge_model_fn=make_mlp_model,
            node_model_fn=make_mlp_model,
            reducer=tf.unsorted_segment_sum
        )

        # Transforms the outputs into appropriate shapes.
        edge_output_size = 1
        edge_fn =lambda: snt.Sequential([
            snt.nets.MLP([edge_output_size],
                         activation=tf.nn.relu, # default is relu
                         name='edge_output'),
            tf.sigmoid])

        with self._enter_variable_scope():
            self._output_transform = modules.GraphIndependent(edge_fn, None, None)
コード例 #5
0
 def __init__(self,
              edge_output_size=None,
              node_output_size=None,
              global_output_size=None,
              name="EncodeProcessDecode"):
     super(EncodeProcessDecode, self).__init__(name=name)
     if edge_output_size is None:
         edge_fn = None
     else:
         edge_fn = make_mlp_model(size=edge_output_size, activation=tf.nn.relu)
     if node_output_size is None:
         node_fn = None
     else:
         node_fn = make_mlp_model(size=node_output_size, activation=tf.nn.relu)
     if global_output_size is None:
         global_fn = None
     else:
         global_fn = make_mlp_model(size=global_output_size, activation=tf.nn.relu)
     self._encoder = FullyGraphIndependent()
     self._core = RecurrentGraphNetwork()
     self._decoder = modules.GraphIndependent(edge_fn, node_fn, global_fn)
コード例 #6
0
    def __init__(self, model_id, name="EncoderGlobalsGraphIndependent"):
        super(EncoderGlobalsGraphIndependent, self).__init__(name=name)
        self.model_id = model_id

        with self._enter_variable_scope():
            self._network = modules.GraphIndependent(
                edge_model_fn=None,
                node_model_fn=None,
                global_model_fn=lambda: get_model_from_config(self.model_id,
                                                              model_type="mlp")
                (n_neurons=
                 EncodeProcessDecode_v5_no_skip_no_core_no_training_flags.
                 n_neurons_globals,
                 n_layers=
                 EncodeProcessDecode_v5_no_skip_no_core_no_training_flags.
                 n_layers_globals,
                 output_size=None,
                 activation_final=False,
                 typ="mlp_layer_norm",
                 name="mlp_encoder_global"),
            )
コード例 #7
0
 def __init__(self, name="Encoder", edge_one_hot=False, edge_vocab=None):
     """
     Predefined trainable ELMo embeddings for the nodes and edges separately and
     a linear layer for the global features.
     :param name: The name of the encoding layer
     """
     super(Encoder, self).__init__(name=name)
     self.word_elmo = hub.Module("https://tfhub.dev/google/elmo/2",
                                 trainable=True)
     if not edge_one_hot or edge_vocab is None:
         self.edge_elmo = hub.Module("https://tfhub.dev/google/elmo/2",
                                     trainable=True)
         edge_model = self.elmo_edge_model_fn
     else:
         self.edge_vocab = edge_vocab
         edge_model = self.one_hot_edge_model_fn
     with self._enter_variable_scope():
         self._encode = graph_net_modules.GraphIndependent(
             edge_model_fn=edge_model,
             node_model_fn=self.node_model_fn,
             global_model_fn=self.global_model_fn)
コード例 #8
0
    def init_transform(self):
        # Transforms the outputs into the appropriate shapes.
        if self.edge_output_size is None:
            edge_fn = None
        else:
            edge_fn = lambda: snt.Linear(self.edge_output_size,
                                         name="edge_output")
        if self.node_output_size is None:
            node_fn = None
        else:
            node_fn = lambda: snt.Linear(self.node_output_size,
                                         name="node_output")
        if self.global_output_size is None:
            global_fn = None
        else:
            global_fn = lambda: snt.Linear(self.global_output_size,
                                           name="global_output")

        with self._enter_variable_scope():
            self._output_transform = modules.GraphIndependent(
                edge_fn, node_fn, global_fn)
コード例 #9
0
  def __init__(self, name="SegmentClassifier"):
    super(SegmentClassifier, self).__init__(name=name)

    self._encoder = MLPGraphIndependent()
    self._core = modules.InteractionNetwork(
        edge_model_fn=make_mlp_model,
        node_model_fn=make_mlp_model,
        reducer=tf.unsorted_segment_sum
    )
    self._decoder = MLPGraphIndependent()

    # Transforms the outputs into appropriate shapes.
    edge_output_size = 1
    edge_fn =lambda: snt.Sequential([
        snt.nets.MLP([LATENT_SIZE/2, edge_output_size],
                     activation=tf.nn.relu, # default activation function
                     name='edge_output'),
        tf.sigmoid])

    with self._enter_variable_scope():
      self._output_transform = modules.GraphIndependent(edge_fn, None, None)
コード例 #10
0
 def __init__(self,
              edge_output_size=None,
              node_output_size=None,
              global_output_size=None,
              name="EncodeProcessDecode"):
     super(EncodeProcessDecode, self).__init__(name=name)
     self._encoder = MLPGraphIndependent()
     self._core = MLPGraphNetwork()
     self._decoder = MLPGraphIndependent()
     # Transforms the outputs into the appropriate shapes.
     if edge_output_size is None:
         edge_fn = None
     else:
         edge_fn = lambda: snt.Linear(edge_output_size, name="edge_output")
     if node_output_size is None:
         node_fn = None
     else:
         node_fn = lambda: snt.Linear(node_output_size, name="node_output")
     with self._enter_variable_scope():
         self._output_transform = \
             modules.GraphIndependent(edge_fn, node_fn)
コード例 #11
0
    def _build(self, inputs, is_training, verbose=VERBOSITY):
        """ we want to re-use the cnn encoder for both nodes and global attributes """
        visual_encoder = get_model_from_config(self.model_id,
                                               model_type="visual_encoder")(
                                                   is_training=None,
                                                   name="visual_encoder")
        """ we use a visual AND latent decoder for the nodes since it is necessary to entangle position / velocity and visual data """
        self._network = modules.GraphIndependent(
            edge_model_fn=lambda: get_model_from_config(self.model_id,
                                                        model_type="mlp")
            (n_neurons=EncodeProcessDecode_v3_1082_latent_dim.n_neurons_edges,
             n_layers=EncodeProcessDecode_v3_1082_latent_dim.n_layers_edges,
             output_size=None,
             typ="mlp_layer_norm",
             name="mlp_encoder_edge"),
            node_model_fn=lambda: get_model_from_config(
                self.model_id, model_type="visual_and_latent_encoder")
            (visual_encoder, name="visual_and_latent_node_encoder"),
            global_model_fn=None)

        return self._network(inputs)
    def _build(self, inputs, is_training, verbose=VERBOSITY):
        self._network = modules.GraphIndependent(
            edge_model_fn=None,
            node_model_fn=lambda:
            VisualAndLatentDecoderSonnet(name="visual_and_latent_node_decoder",
                                         is_training=is_training),
            global_model_fn=lambda: get_model_from_config(
                model_id=self.model_id, model_type="mlp")
            (n_neurons=
             EncodeProcessDecode_v7_edge_segmentation_no_edges_dropout.
             n_neurons_globals,
             n_layers=EncodeProcessDecode_v7_edge_segmentation_no_edges_dropout
             .n_layers_globals,
             output_size=
             EncodeProcessDecode_v7_edge_segmentation_no_edges_dropout.
             global_output_size,
             typ="mlp_transform",
             activation_final=False,
             name="mlp_decoder_global"))

        return self._network(inputs)
コード例 #13
0
    def _build(self,
               inputs,
               is_training,
               skip1,
               skip2,
               skip3,
               verbose=VERBOSITY):
        self.visual_decoder = get_model_from_config(
            model_id=self.model_id,
            model_type="visual_decoder")(is_training=is_training,
                                         name="visual_decoder")

        # --------------- SKIP CONNECTION --------------- #
        self.visual_decoder.skip1 = skip1
        self.visual_decoder.skip2 = skip2
        self.visual_decoder.skip3 = skip3

        self._network = modules.GraphIndependent(
            edge_model_fn=lambda: get_model_from_config(model_id=self.model_id,
                                                        model_type="mlp")
            (n_neurons=EncodeProcessDecode_v4_172.n_neurons_edges,
             n_layers=EncodeProcessDecode_v4_172.n_layers_edges,
             output_size=EncodeProcessDecode_v4_172.edge_output_size,
             typ="mlp_transform",
             activation_final=False,
             name="mlp_decoder_edge"),
            node_model_fn=lambda: get_model_from_config(
                model_id=self.model_id, model_type="visual_and_latent_decoder")
            (self.visual_decoder, name="visual_and_latent_node_decoder"),
            global_model_fn=lambda: get_model_from_config(
                model_id=self.model_id, model_type="mlp")
            (n_neurons=EncodeProcessDecode_v4_172.n_neurons_globals,
             n_layers=EncodeProcessDecode_v4_172.n_layers_globals,
             output_size=EncodeProcessDecode_v4_172.global_output_size,
             typ="mlp_transform",
             activation_final=False,
             name="mlp_decoder_global"))

        return self._network(inputs)
    def _build(self, inputs, is_training, verbose=VERBOSITY):
        visual_decoder = get_model_from_config(self.model_id, model_type="visual_decoder")(is_training=is_training, name="visual_decoder")

        self._network = modules.GraphIndependent(
            edge_model_fn=lambda: get_model_from_config(model_id=self.model_id, model_type="mlp")(n_neurons=EncodeProcessDecode_v3_172_visual_latent_dim.n_neurons_edges,
                                                                                                  n_layers=EncodeProcessDecode_v3_172_visual_latent_dim.n_layers_edges,
                                                                                                  output_size=EncodeProcessDecode_v3_172_visual_latent_dim.edge_output_size,
                                                                                                  typ="mlp_transform",
                                                                                                  activation_final=False,
                                                                                                  name="mlp_decoder_edge"),

            node_model_fn=lambda: get_model_from_config(model_id=self.model_id, model_type="visual_and_latent_decoder")(visual_decoder,
                                                                                                                        name="visual_and_latent_node_decoder"),

            global_model_fn=lambda: get_model_from_config(model_id=self.model_id, model_type="mlp")(n_neurons=EncodeProcessDecode_v3_172_visual_latent_dim.n_neurons_globals,
                                                                                                    n_layers=EncodeProcessDecode_v3_172_visual_latent_dim.n_layers_globals,
                                                                                                    output_size=EncodeProcessDecode_v3_172_visual_latent_dim.global_output_size,
                                                                                                    typ="mlp_transform",
                                                                                                    activation_final=False,
                                                                                                    name="mlp_decoder_global"),
        )
        return self._network(inputs)
コード例 #15
0
    def __init__(self, model_id, name="CNNMLPDecoderGraphIndependent"):
        super(CNNMLPDecoderGraphIndependent, self).__init__(name=name)
        self.model_id = model_id

        with self._enter_variable_scope():
            visual_decoder = get_model_from_config(
                model_id=self.model_id,
                model_type="visual_decoder")(is_training=True,
                                             name="visual_decoder")

            self._network = modules.GraphIndependent(
                edge_model_fn=lambda: get_model_from_config(
                    model_id=self.model_id, model_type="mlp")
                (n_neurons=EncodeProcessDecode_v3_312_latent_dim_no_train_flags
                 .n_neurons_edges,
                 n_layers=EncodeProcessDecode_v3_312_latent_dim_no_train_flags.
                 n_layers_edges,
                 output_size=
                 EncodeProcessDecode_v3_312_latent_dim_no_train_flags.
                 edge_output_size,
                 typ="mlp_transform",
                 name="mlp_decoder_edge"),
                node_model_fn=lambda: get_model_from_config(
                    model_id=self.model_id,
                    model_type="visual_and_latent_decoder")
                (visual_decoder, name="visual_and_latent_node_decoder"),
                global_model_fn=lambda: get_model_from_config(
                    model_id=self.model_id, model_type="mlp")
                (n_neurons=EncodeProcessDecode_v3_312_latent_dim_no_train_flags
                 .n_neurons_globals,
                 n_layers=EncodeProcessDecode_v3_312_latent_dim_no_train_flags.
                 n_layers_globals,
                 output_size=
                 EncodeProcessDecode_v3_312_latent_dim_no_train_flags.
                 global_output_size,
                 typ="mlp_transform",
                 name="mlp_decoder_global"),
            )
コード例 #16
0
ファイル: utils.py プロジェクト: ttvand/Molecular-Properties
 def __init__(self,
              edge_output_size=None,
              node_output_size=None,
              global_output_size=None,
              latent_size=16,
              num_layers=2,
              separate_edge_output=False,
              edge_output_layer_norm=False,
              skip_encoder_decoder=False,
              name="MyEncodeProcessDecode"):
     super(MyEncodeProcessDecode, self).__init__(name=name)
     self._encoder = MLPGraphIndependent(latent_size, num_layers)
     self._core = MLPGraphNetwork(latent_size, num_layers)
     self._edge_type_concat = separate_edge_output
     self._decoder = MLPGraphIndependent(
         latent_size,
         num_layers,
         output_independent=True,
         separate_edge_output=separate_edge_output,
         edge_output_layer_norm=edge_output_layer_norm)
     self._skip_encoder_decoder = skip_encoder_decoder
     # Transforms the outputs into the appropriate shapes.
     if edge_output_size is None:
         edge_fn = None
     else:
         edge_fn = lambda: snt.Linear(edge_output_size, name="edge_output")
     if node_output_size is None:
         node_fn = None
     else:
         node_fn = lambda: snt.Linear(node_output_size, name="node_output")
     if global_output_size is None:
         global_fn = None
     else:
         global_fn = lambda: snt.Linear(global_output_size,
                                        name="global_output")
     with self._enter_variable_scope():
         self._output_transform = modules.GraphIndependent(
             edge_fn, node_fn, global_fn)
コード例 #17
0
    def __init__(self, name="EdgeGlobalClassifier"):
        super(EdgeGlobalClassifier, self).__init__(name=name)

        self._encoder = MLPGraphIndependent()
        self._core = MLPGraphNetwork()
        self._decoder = MLPGraphIndependent()

        # Transforms the outputs into appropriate shapes
        # assuming the target of the outputs of
        # global and edge is binary.
        global_output_size = 1
        global_fn = lambda: snt.Sequential([
            snt.nets.MLP([LATENT_SIZE, global_output_size],
                         name='global_output'), tf.sigmoid
        ])
        edge_output_size = 1
        edge_fn = lambda: snt.Sequential([
            snt.nets.MLP([LATENT_SIZE, edge_output_size], name='edge_output'),
            tf.sigmoid
        ])

        self._output_transform = modules.GraphIndependent(
            edge_fn, None, global_fn)
コード例 #18
0
    def _build(self, inputs, is_training, skip1, skip2, verbose=VERBOSITY):
        """" re-initializing _network because it is currently not possible to pass the is_training flag at init() time """
        self.visual_decoder = get_model_from_config(self.model_id, model_type="visual_decoder")(is_training=is_training,
                                                                                           name="visual_decoder")
        self.visual_decoder.skip1 = skip1
        self.visual_decoder.skip2 = skip2

        self._network = modules.GraphIndependent(
            edge_model_fn=lambda: get_model_from_config(model_id=self.model_id, model_type="mlp")(n_neurons=EncodeProcessDecode_v3_172_visual_latent_dim_skip_connection.n_neurons_edges,
                                                                                                  n_layers=EncodeProcessDecode_v3_172_visual_latent_dim_skip_connection.n_layers_edges,
                                                                                                  output_size=EncodeProcessDecode_v3_172_visual_latent_dim_skip_connection.edge_output_size,
                                                                                                  typ="mlp_transform",
                                                                                                  name="mlp_decoder_edge"),

            node_model_fn=lambda: get_model_from_config(model_id=self.model_id, model_type="visual_and_latent_decoder")(self.visual_decoder,
                                                                                                                        name="visual_and_latent_node_decoder"),

            global_model_fn=lambda: get_model_from_config(model_id=self.model_id, model_type="mlp")(n_neurons=EncodeProcessDecode_v3_172_visual_latent_dim_skip_connection.n_neurons_globals,
                                                                                                    n_layers=EncodeProcessDecode_v3_172_visual_latent_dim_skip_connection.n_layers_globals,
                                                                                                    output_size=EncodeProcessDecode_v3_172_visual_latent_dim_skip_connection.global_output_size,
                                                                                                    typ="mlp_transform",
                                                                                                    name="mlp_decoder_global"), )
        return self._network(inputs)
コード例 #19
0
    def __init__(self, model_id, name="CNNMLPEncoderGraphIndependent"):
        super(CNNMLPEncoderGraphIndependent, self).__init__(name=name)
        self.model_id = model_id

        with self._enter_variable_scope():
            """ we use a visual AND latent decoder for the nodes since it is necessary to entangle position / velocity and visual data """
            self._network = modules.GraphIndependent(
                edge_model_fn=lambda: get_model_from_config(self.model_id,
                                                            model_type="mlp")
                (n_neurons=
                 EncodeProcessDecode_v5_no_skip_no_core_no_training_flags.
                 n_neurons_edges,
                 n_layers=
                 EncodeProcessDecode_v5_no_skip_no_core_no_training_flags.
                 n_layers_edges,
                 output_size=None,
                 typ="mlp_layer_norm",
                 activation_final=False,
                 name="mlp_encoder_edge"),
                node_model_fn=lambda: get_model_from_config(
                    self.model_id, model_type="visual_and_latent_encoder")
                (name="visual_and_latent_node_encoder"),
                global_model_fn=None)
コード例 #20
0
ファイル: core.py プロジェクト: voice-displays/kglib
    def __init__(self,
                 num_node_types,
                 num_edge_types,
                 type_embedding_dim,
                 attr_embedding_dim,
                 attr_embedders,
                 edge_output_size=3,
                 node_output_size=3,
                 latent_size=16,
                 num_layers=2,
                 name="KGCN"):
        super(KGCN, self).__init__(name=name)

        self._num_node_types = num_node_types
        self._num_edge_types = num_edge_types
        self._type_embedding_dim = type_embedding_dim
        self._attr_embedding_dim = attr_embedding_dim
        self._attr_embedders = attr_embedders
        self._latent_size = latent_size
        self._num_layers = num_layers

        # Transforms the outputs into the appropriate shapes.
        if edge_output_size is None:
            edge_fn = None
        else:
            edge_fn = lambda: snt.Linear(edge_output_size, name="edge_output")
        if node_output_size is None:
            node_fn = None
        else:
            node_fn = lambda: snt.Linear(node_output_size, name="node_output")
        with self._enter_variable_scope():
            self._encoder = self._kg_encoder()
            self._core = MLPGraphNetwork()
            self._decoder = MLPGraphIndependent()
            self._output_transform = modules.GraphIndependent(
                edge_fn, node_fn, None)
コード例 #21
0
 def __init__(self, name="MLPGraphIndependent"):
     super(MLPGraphIndependent, self).__init__(name=name)
     self._network = modules.GraphIndependent(
         edge_model_fn=make_mlp_model,
         node_model_fn=make_mlp_model,
         global_model_fn=make_mlp_model)
コード例 #22
0
ファイル: models.py プロジェクト: utkuoguzman/graph_rl
    def __init__(self,
                 num_processing_steps=None,
                 latent_size=None,
                 n_layers=None,
                 edge_output_size=None,
                 node_output_size=None,
                 global_output_size=None,
                 reducer=None,
                 out_init_scale=5.0,
                 name="AggregationNet"):
        super(AggregationDiffNet, self).__init__(name=name)

        if num_processing_steps is None:
            self._proc_hops = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
        else:
            self._proc_hops = num_processing_steps

        if reducer is None or reducer == 'max':
            reducer = unsorted_segment_max_or_zero
        elif reducer == 'logsumexp':
            reducer = segment_logsumexp
        elif reducer == 'softmax':
            reducer = segment_transformer
        elif reducer == 'sum':
            reducer = tf.math.unsorted_segment_sum
        else:
            raise ValueError('Unkown reducer!')

        if latent_size is None:
            latent_size = 16

        if n_layers is None:
            n_layers = 2

        self._num_processing_steps = len(self._proc_hops)
        self._n_stacked = latent_size * self._num_processing_steps

        def make_mlp():
            return snt.nets.MLP([latent_size] * n_layers, activate_final=True)

        # def make_linear():
        #     return snt.nets.MLP([latent_size], activate_final=False)

        self._core = modules.GraphNetwork(
            edge_model_fn=make_mlp,
            node_model_fn=make_mlp,
            global_model_fn=make_mlp,
            edge_block_opt={'use_globals': False},
            node_block_opt={
                'use_globals': False,
                'use_sent_edges': False
            },
            name="graph_net",
            reducer=reducer)

        self._encoder = modules.GraphIndependent(make_mlp,
                                                 make_mlp,
                                                 make_mlp,
                                                 name="encoder")
        self._decoder = modules.GraphIndependent(make_mlp,
                                                 make_mlp,
                                                 make_mlp,
                                                 name="decoder")

        inits = {
            'w': ortho_init(out_init_scale),
            'b': tf.constant_initializer(0.0)
        }

        # Transforms the outputs into the appropriate shapes.
        edge_fn = None if edge_output_size is None else lambda: snt.Linear(
            edge_output_size, initializers=inits, name="edge_output")
        node_fn = None if node_output_size is None else lambda: snt.Linear(
            node_output_size, initializers=inits, name="node_output")
        global_fn = None if global_output_size is None else lambda: snt.Linear(
            global_output_size, initializers=inits, name="global_output")

        with self._enter_variable_scope():
            self._output_transform = modules.GraphIndependent(edge_fn,
                                                              node_fn,
                                                              global_fn,
                                                              name="output")
コード例 #23
0
    def __init__(self,
                 num_processing_steps=None,
                 latent_size=None,
                 n_layers=None,
                 edge_output_size=None,
                 node_output_size=None,
                 global_output_size=None,
                 reducer=None,
                 out_init_scale=5.0,
                 name="AggregationNet"):
        super(AggregationNet, self).__init__(name=name)

        if num_processing_steps is None:
            self._num_processing_steps = 5
        else:
            self._num_processing_steps = num_processing_steps

        if reducer is None or reducer == 'max':
            reducer = unsorted_segment_max_or_zero
        elif reducer == 'mean':
            reducer = tf.math.unsorted_segment_mean
        elif reducer == 'sum':
            reducer = tf.math.unsorted_segment_sum
        else:
            raise ValueError('Unknown reducer!')

        if latent_size is None:
            latent_size = 16

        if n_layers is None:
            n_layers = 2

        def make_mlp():
            return snt.nets.MLP([latent_size] * n_layers, activate_final=True)

        if self._num_processing_steps > 0:
            # Edge block copies the node features onto the edges.
            core_a = blocks.EdgeBlock(edge_model_fn=lambda: Identity(),
                                      use_edges=False,
                                      use_receiver_nodes=False,
                                      use_sender_nodes=True,
                                      use_globals=False,
                                      name='LinearNodeAggGCN_core_a')

            # Then, edge data is aggregated onto the node by the reducer function.
            core_b = blocks.NodeBlock(node_model_fn=lambda: Identity(),
                                      use_received_edges=True,
                                      use_sent_edges=False,
                                      use_nodes=False,
                                      use_globals=False,
                                      received_edges_reducer=reducer,
                                      name='LinearNodeAggGCN_core_b')

            self._cores = [core_a, core_b]

        self._encoder = modules.GraphIndependent(make_mlp,
                                                 make_mlp,
                                                 make_mlp,
                                                 name="encoder")
        self._decoder = modules.GraphIndependent(make_mlp,
                                                 make_mlp,
                                                 make_mlp,
                                                 name="decoder")

        inits = {
            'w': ortho_init(out_init_scale),
            'b': tf.constant_initializer(0.0)
        }

        # Transforms the outputs into the appropriate shapes.
        edge_fn = None if edge_output_size is None else lambda: snt.Linear(
            edge_output_size, initializers=inits, name="edge_output")
        node_fn = None if node_output_size is None else lambda: snt.Linear(
            node_output_size, initializers=inits, name="node_output")
        global_fn = None if global_output_size is None else lambda: snt.Linear(
            global_output_size, initializers=inits, name="global_output")
        with self._enter_variable_scope():
            self._output_transform = modules.GraphIndependent(edge_fn,
                                                              node_fn,
                                                              global_fn,
                                                              name="output")
コード例 #24
0
    def __init__(self,
                 num_processing_steps=None,
                 latent_size=None,
                 n_layers=None,
                 edge_output_size=None,
                 node_output_size=None,
                 global_output_size=None,
                 reducer=None,
                 out_init_scale=5.0,
                 name="AggregationNet"):
        super(NonLinearGraphNet, self).__init__(name=name)

        if num_processing_steps is None:
            self._num_processing_steps = 5
        else:
            self._num_processing_steps = num_processing_steps

        if reducer is None or reducer == 'max':
            reducer = unsorted_segment_max_or_zero
        elif reducer == 'mean':
            reducer = tf.math.unsorted_segment_mean
        elif reducer == 'sum':
            reducer = tf.math.unsorted_segment_sum
        else:
            raise ValueError('Unknown reducer!')

        if latent_size is None:
            latent_size = 16

        if n_layers is None:
            n_layers = 2

        def make_mlp():
            return snt.nets.MLP([latent_size] * n_layers, activate_final=False)

        if self._num_processing_steps > 0:
            # Edge model f^e(v_sender, v_receiver, e)     -   in the linear linear model, f^e = v_sender
            # Average over all the received edge features to get e'
            # Node model f^v(v, e'), but in the linear model, it was just f^v = e'
            self._core = modules.GraphNetwork(
                edge_model_fn=make_mlp,
                node_model_fn=make_mlp,
                global_model_fn=make_mlp,
                edge_block_opt={'use_globals': False},
                node_block_opt={
                    'use_globals': False,
                    'use_sent_edges': False
                },
                name="graph_net",
                reducer=reducer)

        self._encoder = modules.GraphIndependent(make_mlp,
                                                 make_mlp,
                                                 make_mlp,
                                                 name="encoder")
        self._decoder = modules.GraphIndependent(make_mlp,
                                                 make_mlp,
                                                 make_mlp,
                                                 name="decoder")

        inits = {
            'w': ortho_init(out_init_scale),
            'b': tf.constant_initializer(0.0)
        }

        # Transforms the outputs into the appropriate shapes.
        edge_fn = None if edge_output_size is None else lambda: snt.Linear(
            edge_output_size, initializers=inits, name="edge_output")
        node_fn = None if node_output_size is None else lambda: snt.Linear(
            node_output_size, initializers=inits, name="node_output")
        global_fn = None if global_output_size is None else lambda: snt.Linear(
            global_output_size, initializers=inits, name="global_output")
        with self._enter_variable_scope():
            self._output_transform = modules.GraphIndependent(edge_fn,
                                                              node_fn,
                                                              global_fn,
                                                              name="output")
コード例 #25
0
    def __init__(self,
                 edge_output_size=None,
                 node_output_size=None,
                 global_output_size=None,
                 edge_layer_activation=tf.nn.relu,
                 node_layer_activation=tf.nn.relu,
                 global_layer_activation=tf.nn.relu,
                 last_edge_layer_activation=tf.nn.softmax,
                 last_node_layer_activation=tf.nn.softmax,
                 last_global_layer_activation=tf.keras.activations.linear,
                 edge_vocab_size=20,
                 edge_embed_dim=100,
                 node_vocab_size=1000,
                 node_embed_dim=100,
                 name="GraphAttention"):
        """
        This network structure is supposed to handle NLP problems.
        :param edge_output_size: The size of the output vector corresponding to each edge
        :param node_output_size: The size of the output vector corresponding to each node
        :param global_output_size: The size of the output vector corresponding to the global feature
        :param edge_layer_activation: The activation used in each layer considering the edges. ReLU by default.
        :param node_layer_activation: The activation used in each layer considering the nodes. ReLU by default.
        :param global_layer_activation: The activation used in each layer considering the global feature.
                                        ReLU by default.
        :param last_edge_layer_activation: The activation function of the output layer corresponding to the edges.
                                           SoftMax by default.
        :param last_node_layer_activation: The activation function of the output layer corresponding to the nodes.
                                           SoftMax by default.
        :param last_global_layer_activation: The activation function of the output layer corresponding to
                                             the global features. Linear by default.
        :param edge_vocab_size: The size of the vocabulary containing the edges, if we use a non-pretrained embedding.
        :param edge_embed_dim: The dimension of the edge embedding, if we use a non-pretrained embedding.
        :param node_vocab_size: The size of the vocabulary containing the nodes, if we use a non-pretrained embedding.
        :param node_embed_dim: The dimension of the node embedding, if we use a non-pretrained embedding.
        :param name: The name of the network
        """

        super(SimpleGraphAttention, self).__init__(name=name)

        self.edge_layer_activation = edge_layer_activation
        self.node_layer_activation = node_layer_activation
        self.global_layer_activation = global_layer_activation
        self.edge_vocab_size = edge_vocab_size
        self.edge_embed_dim = edge_embed_dim
        self.node_vocab_size = node_vocab_size
        self.node_embed_dim = node_embed_dim

        self._encoder = Encoder()

        self._network = graph_net_modules.GraphNetwork(
            edge_model_fn=self.edge_model_fn,
            node_model_fn=self.node_model_fn,
            global_model_fn=self.global_model_fn,
            reducer=tf.unsorted_segment_sum)

        # Transforms the outputs into the appropriate shapes.
        edge_fn = None if edge_output_size is None else \
            lambda: sonnet_nets.ActivatedLinear(edge_output_size, last_edge_layer_activation)
        node_fn = None if node_output_size is None else \
            lambda: sonnet_nets.ActivatedLinear(node_output_size, last_node_layer_activation)
        global_fn = None if global_output_size is None else \
            lambda: sonnet_nets.ActivatedLinear(global_output_size, last_global_layer_activation)
        with self._enter_variable_scope():
            self._output_transform = graph_net_modules.GraphIndependent(
                edge_fn, node_fn, global_fn)