示例#1
0
    def forward(self, X):
        hidden_size = self.hidden_size
        input_dim = self.input_dim
        num_classes = self.num_classes

        #--@TODO slim.stack
        # Verbose way:
        #x = slim.fully_connected(x, 32, scope='fc/fc_1')
        #x = slim.fully_connected(x, 64, scope='fc/fc_2')
        #x = slim.fully_connected(x, 128, scope='fc/fc_3')
        ## Equivalent, TF-Slim way using slim.stack:
        #slim.stack(x, slim.fully_connected, [32, 64, 128], scope='fc')

        #--------so no local variable, make it get_var or class member var so we can share
        if isinstance(X, tf.Tensor):
            w_h = melt.get_weights('w_h', [input_dim, hidden_size])
        else:
            with tf.device('/cpu:0'):
                w_h = melt.get_weights('w_h', [input_dim, hidden_size])
        b_h = melt.get_bias('b_h', [hidden_size])
        w_o = melt.get_weights('w_o', [hidden_size, num_classes])
        b_o = melt.get_bias('b_o', [num_classes])
        py_x = melt.mlp_forward(X,
                                w_h,
                                b_h,
                                w_o,
                                b_o,
                                activation=self.activation)

        return py_x
示例#2
0
def mlp_nobias(x, hidden_size, output_size, activation=tf.nn.relu, scope=None):
    scope = 'mlp_nobias' if scope is None else scope
    with tf.variable_scope(scope):
        input_dim = utils.last_dimension(x.get_shape(), min_rank=2)
        if isinstance(x, tf.Tensor):
            w_h = melt.get_weights('w_h', [input_dim, hidden_size])
        else:
            with tf.device('/cpu:0'):
                w_h = melt.get_weights('w_h', [input_dim, hidden_size])
        w_o = melt.get_weights('w_o', [hidden_size, output_size])
        return melt.mlp_forward_nobias(x, w_h, w_o, activation)
示例#3
0
def mlp(x, hidden_size, output_size, activation=tf.nn.relu, scope=None):
    scope = 'mlp' if scope is None else scope
    with tf.variable_scope(scope):
        hidden = fully_connected(x, hidden_size, activation)
        w_o = melt.get_weights('w_o', [hidden_size, output_size])
        b_o = melt.get_bias('b_o', [output_size])
        return tf.nn.xw_plus_b(hidden, w_o, b_o)
示例#4
0
def fully_connected(x, output_size, activation=tf.nn.relu, scope=None):
    #@TODO -1 or last dim ? NotImplementedError("Negative indices are currently unsupported")
    #input_dim = tf.shape(x)[-1]
    #@TODO how is slim.fully_connected get inputdim and use..
    #below will now work int() argument must be a string or a number, not 'Tensor' [input_dim, output_size])
    #input_dim = tf.shape(x)[1]
    #check contrib\layers\python\layers\layers.py
    scope = 'fc' if scope is None else scope
    with tf.variable_scope(scope):
        input_dim = utils.last_dimension(x.get_shape(), min_rank=2)
        if isinstance(x, tf.Tensor):
            w_h = melt.get_weights('w_h', [input_dim, output_size])
        else:
            with tf.device('/cpu:0'):
                w_h = melt.get_weights('w_h', [input_dim, output_size])
        b_h = melt.get_bias('b_h', [output_size])
        return activation(melt.matmul(x, w_h) + b_h)