def testWhileContext(self):
   with self.test_session() as sess:
     i = tf.constant(0)
     c = lambda i: tf.less(i, 10)
     b = lambda i: tf.add(i, 1)
     tf.while_loop(c, b, [i])
     for op in sess.graph.get_operations():
       c = op._get_control_flow_context()
       if c:
         compare.ProtoEq(
             c.to_proto(),
             control_flow_ops.WhileContext.from_proto(c.to_proto()).to_proto())
 def testCondContext(self):
   with self.test_session() as sess:
     x = tf.constant(2)
     y = tf.constant(5)
     control_flow_ops.cond(tf.less(x, y),
                           lambda: tf.mul(x, 17),
                           lambda: tf.add(y, 23))
     for op in sess.graph.get_operations():
       c = op._get_control_flow_context()
       if c:
         compare.ProtoEq(
             c.to_proto(),
             control_flow_ops.CondContext.from_proto(c.to_proto()).to_proto())
예제 #3
0
 def call(self, inputs):
     inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
     shape = inputs.get_shape().as_list()
     if len(shape) > 1:
         # Broadcasting is required for the inputs.
         outputs1 = []
         outputss = []
         outputst = standard_ops.tensordot(inputs, self.smatrix,
                                           [[len(shape) - 1], [0]])
         outputs1.append(
             standard_ops.scalar_mul(self.kernel[0][0], outputst))
         outputs2 = standard_ops.scalar_mul(1.0 - self.kernel[0][0], inputs)
         outputss.append(standard_ops.add(outputs1[0], outputs2))
         for i in range(1, self.smooth_num):
             outputst = standard_ops.tensordot(outputss[i - 1],
                                               self.smatrix,
                                               [[len(shape) - 1], [0]])
             outputs1.append(
                 standard_ops.scalar_mul(self.kernel[0][0], outputst))
             outputss.append(standard_ops.add(outputs1[i], outputs2))
         outputs = outputss[self.smooth_num - 1]
     # Reshape the output back to the original ndim of the input.
     if not context.executing_eagerly():
         output_shape = shape[:-1] + [self.units]
         outputs.set_shape(output_shape)
     else:
         outputs2 = gen_math_ops.mat_mul(1.0 - self.kernel[0][0], inputs)
         outputs = gen_math_ops.mat_mul(inputs, self.smatrix)
         outputs = gen_math_ops.mat_mul(outputs, self.kernel)
         outputs = gen_math_ops.add(outputs, outputs2)
         for i in range(1, self.smooth_num):
             outputs = gen_math_ops.mat_mul(outputs, self.smatrix)
             outputs = gen_math_ops.mat_mul(outputs, self.kernel)
             outputs = gen_math_ops.add(outputs, outputs2)
     if self.use_bias:
         outputs = nn.bias_add(outputs, self.bias)
     if self.activation is not None:
         return self.activation(outputs)  # pylint: disable=not-callable
     return outputs
예제 #4
0
 def testWhileContext(self):
     with self.test_session() as sess:
         i = tf.constant(0)
         c = lambda i: tf.less(i, 10)
         b = lambda i: tf.add(i, 1)
         tf.while_loop(c, b, [i])
         for op in sess.graph.get_operations():
             c = op._get_control_flow_context()
             if c:
                 compare.ProtoEq(
                     c.to_proto(),
                     control_flow_ops.WhileContext.from_proto(
                         c.to_proto()).to_proto())
예제 #5
0
 def testCondContext(self):
     with self.test_session() as sess:
         x = tf.constant(2)
         y = tf.constant(5)
         control_flow_ops.cond(tf.less(x, y), lambda: tf.mul(x, 17),
                               lambda: tf.add(y, 23))
         for op in sess.graph.get_operations():
             c = op._get_control_flow_context()
             if c:
                 compare.ProtoEq(
                     c.to_proto(),
                     control_flow_ops.CondContext.from_proto(
                         c.to_proto()).to_proto())
예제 #6
0
def dense_layer_ot(x,
                   in_size,
                   out_size,
                   sequence_length,
                   scope_name,
                   activation_fn=tf.nn.elu,
                   batch_norm=fu.create_BNParams()):
    '''
    Apply a dense layer over all the time_stamp.
    This is for filtering the timeseries
    :param x: input data
    :param in_size: input size or number of feature
    :param out_size: output size
    :param sequence_length: length of the sequence. Number of timestemp to iterate of
    :param scope_name: scope name of this transformation
    :param activation_fn: activation function
    :param batch_norm: named indicating if applying batch normalization and the phase(true if training, false if tensing)
    :return: 
    '''
    layers_output = []
    with tf.variable_scope(scope_name) as vs:
        W = tf.get_variable(
            'weight_filter',
            shape=[in_size, out_size],
            initializer=tf.contrib.layers.xavier_initializer(),
            collections=[GraphKeys.WEIGHTS, GraphKeys.GLOBAL_VARIABLES],
            trainable=True)

        if not batch_norm.apply:
            b = tf.get_variable(
                'bias_filter',
                shape=[out_size],
                initializer=tf.constant_initializer(0.),
                collections=[GraphKeys.BIASES, GraphKeys.GLOBAL_VARIABLES],
                trainable=True)

        for t in range(0, sequence_length):
            layer_output = standard_ops.matmul(x[:, t, :], W)

            if batch_norm.apply:
                layer_output = tf.contrib.layers.batch_norm(
                    layer_output,
                    center=batch_norm.center,
                    scale=batch_norm.scale,
                    is_training=batch_norm.phase,
                    scope=vs.name + '_bn')
            else:
                # apply batch norm
                layer_output = standard_ops.add(layer_output, b)

            if activation_fn:
                layer_output = activation_fn(layer_output)

            layers_output.append(tf.expand_dims(
                layer_output,
                1))  # add again the timestemp dimention to allow concatenation

        # proved to be the same weights
        s.add_hidden_layer_summary(layers_output[-1], vs.name, weight=W)
        if not batch_norm.apply:
            tf.summary.histogram(vs.name + '_bias', b)

    return tf.concat(layers_output, axis=1)