예제 #1
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 placeholders,
                 dropout=0.,
                 sparse_inputs=False,
                 act=tf.nn.relu,
                 bias=False,
                 featureless=False,
                 norm=False,
                 precalc=False,
                 residual=False,
                 **kwargs):
        super(GraphConvolution, self).__init__(**kwargs)

        if dropout:
            self.dropout = placeholders['dropout']
        else:
            self.dropout = 0.

        self.act = act
        self.support = placeholders['support']
        self.sparse_inputs = sparse_inputs
        self.featureless = featureless
        self.bias = bias
        self.norm = norm
        self.precalc = precalc
        self.residual = residual

        # helper variable for sparse dropout
        self.num_features_nonzero = placeholders['num_features_nonzero']

        with tf.variable_scope(self.name + '_vars'):
            self.vars['weights'] = inits.glorot([input_dim, output_dim],
                                                name='weights')
            if self.bias:
                self.vars['bias'] = inits.zeros([output_dim], name='bias')

            if self.norm:
                self.vars['offset'] = inits.zeros([1, output_dim],
                                                  name='offset')
                self.vars['scale'] = inits.ones([1, output_dim], name='scale')

        if self.logging:
            self._log_vars()
예제 #2
0
    def _build(self):

        conv_layers = []
        conv_layers.append(
            GraphConvolution_GCN(input_dim=self.input_dim,
                                 output_dim=FLAGS.hidden1,
                                 placeholders=self.placeholders,
                                 act=tf.nn.relu,
                                 dropout=FLAGS.dropout > 0,
                                 sparse_inputs=True,
                                 logging=self.logging))

        conv_layers.append(
            GraphConvolution_GCN(input_dim=FLAGS.hidden1,
                                 output_dim=FLAGS.hidden2,
                                 placeholders=self.placeholders,
                                 act=tf.nn.relu,
                                 dropout=FLAGS.dropout > 0,
                                 logging=self.logging))

        conv_layers.append(
            GraphConvolution_GCN(input_dim=FLAGS.hidden2,
                                 output_dim=FLAGS.hidden3,
                                 placeholders=self.placeholders,
                                 act=tf.nn.relu,
                                 dropout=FLAGS.dropout > 0,
                                 logging=self.logging))

        pool_layers = []
        pool_layers.append(
            SplitAndAttentionPooling(input_dim=FLAGS.hidden1,
                                     placeholders=self.placeholders,
                                     act=tf.nn.sigmoid,
                                     dropout=FLAGS.dropout > 0,
                                     logging=self.logging))

        pool_layers.append(
            SplitAndAttentionPooling(input_dim=FLAGS.hidden2,
                                     placeholders=self.placeholders,
                                     act=tf.nn.sigmoid,
                                     dropout=FLAGS.dropout > 0,
                                     logging=self.logging))

        pool_layers.append(
            SplitAndAttentionPooling(input_dim=FLAGS.hidden3,
                                     placeholders=self.placeholders,
                                     act=tf.nn.sigmoid,
                                     dropout=FLAGS.dropout > 0,
                                     logging=self.logging))

        mlp_layers = []
        mlp_layers.append(
            Dense(input_dim=FLAGS.hidden1 + FLAGS.hidden2 + FLAGS.hidden3,
                  output_dim=FLAGS.hidden4,
                  placeholders=self.placeholders,
                  act=tf.nn.relu,
                  bias=True,
                  dropout=FLAGS.dropout > 0,
                  logging=self.logging))

        mlp_layers.append(
            Dense(input_dim=FLAGS.hidden4,
                  output_dim=FLAGS.hidden5,
                  placeholders=self.placeholders,
                  act=tf.nn.relu,
                  bias=True,
                  dropout=FLAGS.dropout > 0,
                  logging=self.logging))

        mlp_layers.append(
            Dense(input_dim=FLAGS.hidden5,
                  output_dim=self.embedding_dim,
                  placeholders=self.placeholders,
                  act=lambda x: x,
                  bias=True,
                  dropout=FLAGS.dropout > 0,
                  logging=self.logging))

        discrete_layers = []
        discrete_layers.append(
            Dense(input_dim=self.embedding_dim,
                  output_dim=FLAGS.hidden6,
                  placeholders=self.placeholders,
                  act=tf.nn.relu,
                  bias=True,
                  dropout=FLAGS.dropout > 0,
                  logging=self.logging))

        discrete_layers.append(
            Dense(
                input_dim=FLAGS.hidden6,
                output_dim=self.output_dim,
                placeholders=self.placeholders,
                #                                 act=lambda x: x,
                act=lambda x: 0.5 * tf.nn.tanh(x),
                bias=True,
                dropout=FLAGS.dropout > 0,
                logging=self.logging))

        self.layers = [conv_layers, pool_layers, mlp_layers, discrete_layers]

        # Initialize Bit Weights
        if FLAGS.bit_weight_type == 'var':
            with tf.variable_scope(self.name):
                with tf.variable_scope(self.name + '_bit_weights'):
                    self.bit_weights = ones([FLAGS.hash_code_len],
                                            name='bit_weights')
        elif FLAGS.bit_weight_type == 'log':
            self.bit_weights = tf.constant(
                [np.log(i + 1) + 1 for i in range(FLAGS.hash_code_len)],
                dtype=tf.float32)
        elif FLAGS.bit_weight_type == 'exp':
            self.bit_weights = tf.constant(
                [2**(i - 1) for i in range(FLAGS.hash_code_len)],
                dtype=tf.float32)
        elif FLAGS.bit_weight_type == 'const':
            #self.bit_weights = None
            self.bit_weights = tf.constant(
                [1 for i in range(FLAGS.hash_code_len)], dtype=tf.float32)
        else:
            raise RuntimeError('Unrecognized Bit Weight Type: ' +
                               FLAGS.bit_weight_type)