예제 #1
0
 def model(self, X, w_h1, w_h2, w_o):
     h1 = tf.nn.sigmoid(melt.matmul(
         X,
         w_h1))  # this is a basic mlp, think 2 stacked logistic regressions
     h2 = tf.nn.sigmoid(melt.matmul(
         h1,
         w_h2))  # this is a basic mlp, think 2 stacked logistic regressions
     return tf.matmul(
         h2, w_o
     )  # note that we dont take the softmax at the end because our cost fn does that for us
예제 #2
0
 def model(self, X, w_h, b_h, w_o, b_o, gpu):
     with tf.device('/gpu:%d' % gpu):
         h = tf.nn.sigmoid(
             melt.matmul(X, w_h) + b_h
         )  # this is a basic mlp, think 2 stacked logistic regressions
         return tf.matmul(
             h, w_o
         ) + b_o  # note that we dont take the softmax at the end because our cost fn does that for us
예제 #3
0
def fully_connected(x, output_size, activation=tf.nn.relu, scope=None):
    #@TODO -1 or last dim ? NotImplementedError("Negative indices are currently unsupported")
    #input_dim = tf.shape(x)[-1]
    #@TODO how is slim.fully_connected get inputdim and use..
    #below will now work int() argument must be a string or a number, not 'Tensor' [input_dim, output_size])
    #input_dim = tf.shape(x)[1]
    #check contrib\layers\python\layers\layers.py
    scope = 'fc' if scope is None else scope
    with tf.variable_scope(scope):
        input_dim = utils.last_dimension(x.get_shape(), min_rank=2)
        if isinstance(x, tf.Tensor):
            w_h = melt.get_weights('w_h', [input_dim, output_size])
        else:
            with tf.device('/cpu:0'):
                w_h = melt.get_weights('w_h', [input_dim, output_size])
        b_h = melt.get_bias('b_h', [output_size])
        return activation(melt.matmul(x, w_h) + b_h)
예제 #4
0
 def model(self, X, w_h, b_h, w_h2, b_h2, w_o, b_o):
     h = self.activation(melt.matmul(X, w_h) + b_h) # this is a basic mlp, think 2 stacked logistic regressions
     h2 = self.activation(tf.matmul(h, w_h2) + b_h2)
     return tf.matmul(h2, w_o) + b_o # note that we dont take the softmax at the end because our cost fn does that for us
예제 #5
0
 def model(self, X, w, b):
     return melt.matmul(X,w) + b
	def model(self, X, w_h, w_o):
		h = tf.nn.sigmoid(melt.matmul(X, w_h)) # this is a basic mlp, think 2 stacked logistic regressions
		return tf.matmul(h, w_o) # note that we dont take the softmax at the end because our cost fn does that for us
	def model(self, X, w):
		return melt.matmul(X,w)
예제 #8
0
def fully_connected(inputs,
                    num_outputs,
                    input_dim=None,
                    activation_fn=nn.relu,
                    normalizer_fn=None,
                    normalizer_params=None,
                    weights_initializer=initializers.xavier_initializer(),
                    weights_regularizer=None,
                    biases_initializer=init_ops.zeros_initializer(),
                    biases_regularizer=None,
                    reuse=None,
                    variables_collections=None,
                    outputs_collections=None,
                    trainable=True,
                    scope=None):

    use_bias = biases_initializer is not None

    #--------TODO: use commented code as layers.fully_connected then, for app code you must manully pass scope like 'mlp' otherwise evaluate will fail try to use Mlp_1 not resue=True b
    #--------by tf.variable_scope.reuse_variables() see http://stackoverflow.com/questions/40536665/tensorflow-varscope-reuse-variables
    #with variable_scope.variable_scope(
    #  scope, 'Mlp', [inputs],
    #  reuse=reuse) as vs:
    scope = 'fully_connected' if scope is None else scope
    with tf.variable_scope(scope):
        is_dense_input = True if isinstance(inputs, tf.Tensor) else False
        dtype = inputs.dtype.base_dtype if is_dense_input else inputs[
            1].values.dtype.base_dtype
        #sparse input must tell input_dim
        assert is_dense_input or input_dim is not None
        if is_dense_input:
            shape = inputs.get_shape().as_list()
            input_dim = shape[-1].value
            assert len(
                shape
            ) == 2, "now only consider X shape dim as 2, TODO: make > 2 ok like layers.fully_connected"

        #-----------deal first hidden
        if is_dense_input:
            w_h = tf.get_variable('weight_hidden',
                                  shape=[input_dim, num_outputs],
                                  initializer=weights_initializer,
                                  regularizer=weights_regularizer,
                                  dtype=dtype,
                                  trainable=trainable)
        else:
            with tf.device('/cpu:0'):
                w_h = tf.get_variable('weight_hidden',
                                      shape=[input_dim, num_outputs],
                                      initializer=weights_initializer,
                                      regularizer=weights_regularizer,
                                      dtype=dtype,
                                      trainable=trainable)

        if use_bias:
            b_h = tf.get_variable('bias_hidden',
                                  shape=[
                                      num_outputs,
                                  ],
                                  initializer=biases_initializer,
                                  regularizer=biases_regularizer,
                                  dtype=dtype,
                                  trainable=trainable)

        outputs = melt.matmul(inputs, w_h)
        if use_bias:
            outputs = nn.bias_add(outputs, b_h)
        if activation_fn is not None:
            outputs = activation_fn(outputs)  # pylint: disable=not-callable

        return outputs
def model(X, w):
    return melt.matmul(X, w)