Ejemplo n.º 1
0
def one_layer(inputs,
              outputs_size,
              activation_fn=tf.nn.tanh,
              precision=global_tf_float_precision,
              stddev=1.0,
              bavg=0.0,
              name='linear',
              reuse=None,
              seed=None,
              use_timestep=False,
              trainable=True,
              useBN=False):
    with tf.variable_scope(name, reuse=reuse):
        shape = inputs.get_shape().as_list()
        w = tf.get_variable(
            'matrix', [shape[1], outputs_size],
            precision,
            tf.random_normal_initializer(stddev=stddev /
                                         np.sqrt(shape[1] + outputs_size),
                                         seed=seed),
            trainable=trainable)
        b = tf.get_variable('bias', [outputs_size],
                            precision,
                            tf.random_normal_initializer(stddev=stddev,
                                                         mean=bavg,
                                                         seed=seed),
                            trainable=trainable)
        hidden = tf.matmul(inputs, w) + b
        if activation_fn != None and use_timestep:
            idt = tf.get_variable('idt', [outputs_size],
                                  precision,
                                  tf.random_normal_initializer(stddev=0.001,
                                                               mean=0.1,
                                                               seed=seed),
                                  trainable=trainable)
        if activation_fn != None:
            if useBN:
                None
                # hidden_bn = self._batch_norm(hidden, name=name+'_normalization', reuse=reuse)
                # return activation_fn(hidden_bn)
            else:
                if use_timestep:
                    return tf.reshape(activation_fn(hidden),
                                      [-1, outputs_size]) * idt
                else:
                    return tf.reshape(activation_fn(hidden),
                                      [-1, outputs_size])
        else:
            if useBN:
                None
                # return self._batch_norm(hidden, name=name+'_normalization', reuse=reuse)
            else:
                return hidden
Ejemplo n.º 2
0
    def _filter_type_ext(self,
                         inputs,
                         natoms,
                         activation_fn=tf.nn.tanh,
                         stddev=1.0,
                         bavg=0.0,
                         name='linear',
                         reuse=None,
                         seed=None,
                         trainable=True):
        # natom x (nei x 4)
        outputs_size = [1] + self.filter_neuron
        outputs_size_2 = self.n_axis_neuron
        with tf.variable_scope(name, reuse=reuse):
            start_index = 0
            result_all = []
            xyz_scatter_1_all = []
            xyz_scatter_2_all = []
            for type_i in range(self.ntypes):
                # cut-out inputs
                # with natom x (nei_type_i x 4)
                inputs_i = tf.slice(inputs, [0, start_index * 4],
                                    [-1, self.sel_a[type_i] * 4])
                start_index += self.sel_a[type_i]
                shape_i = inputs_i.get_shape().as_list()
                # with (natom x nei_type_i) x 4
                inputs_reshape = tf.reshape(inputs_i, [-1, 4])
                xyz_scatter = tf.reshape(
                    tf.slice(inputs_reshape, [0, 0], [-1, 1]), [-1, 1])
                for ii in range(1, len(outputs_size)):
                    w = tf.get_variable(
                        'matrix_' + str(ii) + '_' + str(type_i),
                        [outputs_size[ii - 1], outputs_size[ii]],
                        self.filter_precision,
                        tf.random_normal_initializer(
                            stddev=stddev /
                            np.sqrt(outputs_size[ii] + outputs_size[ii - 1]),
                            seed=seed),
                        trainable=trainable)
                    b = tf.get_variable('bias_' + str(ii) + '_' + str(type_i),
                                        [1, outputs_size[ii]],
                                        self.filter_precision,
                                        tf.random_normal_initializer(
                                            stddev=stddev,
                                            mean=bavg,
                                            seed=seed),
                                        trainable=trainable)
                    if self.filter_resnet_dt:
                        idt = tf.get_variable(
                            'idt_' + str(ii) + '_' + str(type_i),
                            [1, outputs_size[ii]],
                            self.filter_precision,
                            tf.random_normal_initializer(stddev=0.001,
                                                         mean=1.0,
                                                         seed=seed),
                            trainable=trainable)
                    if outputs_size[ii] == outputs_size[ii - 1]:
                        if self.filter_resnet_dt:
                            xyz_scatter += activation_fn(
                                tf.matmul(xyz_scatter, w) + b) * idt
                        else:
                            xyz_scatter += activation_fn(
                                tf.matmul(xyz_scatter, w) + b)
                    elif outputs_size[ii] == outputs_size[ii - 1] * 2:
                        if self.filter_resnet_dt:
                            xyz_scatter = tf.concat(
                                [xyz_scatter, xyz_scatter], 1) + activation_fn(
                                    tf.matmul(xyz_scatter, w) + b) * idt
                        else:
                            xyz_scatter = tf.concat(
                                [xyz_scatter, xyz_scatter], 1) + activation_fn(
                                    tf.matmul(xyz_scatter, w) + b)
                    else:
                        xyz_scatter = activation_fn(
                            tf.matmul(xyz_scatter, w) + b)
                # natom x nei_type_i x out_size
                xyz_scatter = tf.reshape(
                    xyz_scatter, (-1, shape_i[1] // 4, outputs_size[-1]))
                # natom x nei_type_i x 4
                inputs_i_reshape = tf.reshape(inputs_i,
                                              [-1, shape_i[1] // 4, 4])
                # natom x 4 x outputs_size
                xyz_scatter_1 = tf.matmul(inputs_i_reshape,
                                          xyz_scatter,
                                          transpose_a=True)
                xyz_scatter_1 = xyz_scatter_1 * (4.0 / shape_i[1])
                # natom x 4 x outputs_size_2
                xyz_scatter_2 = tf.slice(xyz_scatter_1, [0, 0, 0],
                                         [-1, -1, outputs_size_2])
                xyz_scatter_1_all.append(xyz_scatter_1)
                xyz_scatter_2_all.append(xyz_scatter_2)

            # for type_i in range(self.ntypes):
            #   for type_j in range(type_i, self.ntypes):
            #     # natom x outputs_size x outputs_size_2
            #     result = tf.matmul(xyz_scatter_1_all[type_i], xyz_scatter_2_all[type_j], transpose_a = True)
            #     # natom x (outputs_size x outputs_size_2)
            #     result = tf.reshape(result, [-1, outputs_size_2 * outputs_size[-1]])
            #     result_all.append(tf.identity(result))
            xyz_scatter_2_coll = tf.concat(xyz_scatter_2_all, axis=2)
            for type_i in range(self.ntypes):
                # natom x outputs_size x (outputs_size_2 x ntypes)
                result = tf.matmul(xyz_scatter_1_all[type_i],
                                   xyz_scatter_2_coll,
                                   transpose_a=True)
                # natom x (outputs_size x outputs_size_2 x ntypes)
                result = tf.reshape(
                    result,
                    [-1, outputs_size_2 * self.ntypes * outputs_size[-1]])
                result_all.append(tf.identity(result))

            # natom x (ntypes x outputs_size x outputs_size_2 x ntypes)
            result_all = tf.concat(result_all, axis=1)

        return result_all
Ejemplo n.º 3
0
    def _filter(self,
                inputs,
                type_input,
                natoms,
                activation_fn=tf.nn.tanh,
                stddev=1.0,
                bavg=0.0,
                name='linear',
                reuse=None,
                seed=None,
                trainable=True):
        # natom x (nei x 4)
        shape = inputs.get_shape().as_list()
        outputs_size = [1] + self.filter_neuron
        outputs_size_2 = self.n_axis_neuron
        with tf.variable_scope(name, reuse=reuse):
            start_index = 0
            xyz_scatter_total = []
            for type_i in range(self.ntypes):
                # cut-out inputs
                # with natom x (nei_type_i x 4)
                inputs_i = tf.slice(inputs, [0, start_index * 4],
                                    [-1, self.sel_a[type_i] * 4])
                start_index += self.sel_a[type_i]
                shape_i = inputs_i.get_shape().as_list()
                # with (natom x nei_type_i) x 4
                inputs_reshape = tf.reshape(inputs_i, [-1, 4])
                xyz_scatter = tf.reshape(
                    tf.slice(inputs_reshape, [0, 0], [-1, 1]), [-1, 1])
                if (type_input, type_i) not in self.exclude_types:
                    for ii in range(1, len(outputs_size)):
                        w = tf.get_variable(
                            'matrix_' + str(ii) + '_' + str(type_i),
                            [outputs_size[ii - 1], outputs_size[ii]],
                            self.filter_precision,
                            tf.random_normal_initializer(
                                stddev=stddev / np.sqrt(outputs_size[ii] +
                                                        outputs_size[ii - 1]),
                                seed=seed),
                            trainable=trainable)
                        b = tf.get_variable(
                            'bias_' + str(ii) + '_' + str(type_i),
                            [1, outputs_size[ii]],
                            self.filter_precision,
                            tf.random_normal_initializer(stddev=stddev,
                                                         mean=bavg,
                                                         seed=seed),
                            trainable=trainable)
                        if self.filter_resnet_dt:
                            idt = tf.get_variable(
                                'idt_' + str(ii) + '_' + str(type_i),
                                [1, outputs_size[ii]],
                                self.filter_precision,
                                tf.random_normal_initializer(stddev=0.001,
                                                             mean=1.0,
                                                             seed=seed),
                                trainable=trainable)
                        if outputs_size[ii] == outputs_size[ii - 1]:
                            if self.filter_resnet_dt:
                                xyz_scatter += activation_fn(
                                    tf.matmul(xyz_scatter, w) + b) * idt
                            else:
                                xyz_scatter += activation_fn(
                                    tf.matmul(xyz_scatter, w) + b)
                        elif outputs_size[ii] == outputs_size[ii - 1] * 2:
                            if self.filter_resnet_dt:
                                xyz_scatter = tf.concat(
                                    [xyz_scatter, xyz_scatter],
                                    1) + activation_fn(
                                        tf.matmul(xyz_scatter, w) + b) * idt
                            else:
                                xyz_scatter = tf.concat(
                                    [xyz_scatter, xyz_scatter],
                                    1) + activation_fn(
                                        tf.matmul(xyz_scatter, w) + b)
                        else:
                            xyz_scatter = activation_fn(
                                tf.matmul(xyz_scatter, w) + b)
                else:
                    w = tf.zeros((outputs_size[0], outputs_size[-1]),
                                 dtype=global_tf_float_precision)
                    xyz_scatter = tf.matmul(xyz_scatter, w)
                # natom x nei_type_i x out_size
                xyz_scatter = tf.reshape(
                    xyz_scatter, (-1, shape_i[1] // 4, outputs_size[-1]))
                xyz_scatter_total.append(xyz_scatter)

            # natom x nei x outputs_size
            xyz_scatter = tf.concat(xyz_scatter_total, axis=1)
            # natom x nei x 4
            inputs_reshape = tf.reshape(inputs, [-1, shape[1] // 4, 4])
            # natom x 4 x outputs_size
            xyz_scatter_1 = tf.matmul(inputs_reshape,
                                      xyz_scatter,
                                      transpose_a=True)
            xyz_scatter_1 = xyz_scatter_1 * (4.0 / shape[1])
            # natom x 4 x outputs_size_2
            xyz_scatter_2 = tf.slice(xyz_scatter_1, [0, 0, 0],
                                     [-1, -1, outputs_size_2])
            # # natom x 3 x outputs_size_2
            # qmat = tf.slice(xyz_scatter_2, [0,1,0], [-1, 3, -1])
            # natom x 3 x outputs_size_1
            qmat = tf.slice(xyz_scatter_1, [0, 1, 0], [-1, 3, -1])
            # natom x outputs_size_2 x 3
            qmat = tf.transpose(qmat, perm=[0, 2, 1])
            # natom x outputs_size x outputs_size_2
            result = tf.matmul(xyz_scatter_1, xyz_scatter_2, transpose_a=True)
            # natom x (outputs_size x outputs_size_2)
            result = tf.reshape(result,
                                [-1, outputs_size_2 * outputs_size[-1]])

        return result, qmat
Ejemplo n.º 4
0
    def _filter_r(self,
                  inputs,
                  type_input,
                  natoms,
                  activation_fn=tf.nn.tanh,
                  stddev=1.0,
                  bavg=0.0,
                  name='linear',
                  reuse=None,
                  seed=None,
                  trainable=True):
        # natom x nei
        outputs_size = [1] + self.filter_neuron
        with tf.variable_scope(name, reuse=reuse):
            start_index = 0
            xyz_scatter_total = []
            for type_i in range(self.ntypes):
                # cut-out inputs
                # with natom x nei_type_i
                inputs_i = tf.slice(inputs, [0, start_index],
                                    [-1, self.sel_r[type_i]])
                start_index += self.sel_r[type_i]
                shape_i = inputs_i.get_shape().as_list()
                # with (natom x nei_type_i) x 1
                xyz_scatter = tf.reshape(inputs_i, [-1, 1])
                if (type_input, type_i) not in self.exclude_types:
                    for ii in range(1, len(outputs_size)):
                        w = tf.get_variable(
                            'matrix_' + str(ii) + '_' + str(type_i),
                            [outputs_size[ii - 1], outputs_size[ii]],
                            self.filter_precision,
                            tf.random_normal_initializer(
                                stddev=stddev / np.sqrt(outputs_size[ii] +
                                                        outputs_size[ii - 1]),
                                seed=seed),
                            trainable=trainable)
                        b = tf.get_variable(
                            'bias_' + str(ii) + '_' + str(type_i),
                            [1, outputs_size[ii]],
                            self.filter_precision,
                            tf.random_normal_initializer(stddev=stddev,
                                                         mean=bavg,
                                                         seed=seed),
                            trainable=trainable)
                        if self.filter_resnet_dt:
                            idt = tf.get_variable(
                                'idt_' + str(ii) + '_' + str(type_i),
                                [1, outputs_size[ii]],
                                self.filter_precision,
                                tf.random_normal_initializer(stddev=0.001,
                                                             mean=1.0,
                                                             seed=seed),
                                trainable=trainable)
                        if outputs_size[ii] == outputs_size[ii - 1]:
                            if self.filter_resnet_dt:
                                xyz_scatter += activation_fn(
                                    tf.matmul(xyz_scatter, w) + b) * idt
                            else:
                                xyz_scatter += activation_fn(
                                    tf.matmul(xyz_scatter, w) + b)
                        elif outputs_size[ii] == outputs_size[ii - 1] * 2:
                            if self.filter_resnet_dt:
                                xyz_scatter = tf.concat(
                                    [xyz_scatter, xyz_scatter],
                                    1) + activation_fn(
                                        tf.matmul(xyz_scatter, w) + b) * idt
                            else:
                                xyz_scatter = tf.concat(
                                    [xyz_scatter, xyz_scatter],
                                    1) + activation_fn(
                                        tf.matmul(xyz_scatter, w) + b)
                        else:
                            xyz_scatter = activation_fn(
                                tf.matmul(xyz_scatter, w) + b)
                else:
                    w = tf.zeros((outputs_size[0], outputs_size[-1]),
                                 dtype=global_tf_float_precision)
                    xyz_scatter = tf.matmul(xyz_scatter, w)
                # natom x nei_type_i x out_size
                xyz_scatter = tf.reshape(xyz_scatter,
                                         (-1, shape_i[1], outputs_size[-1]))
                xyz_scatter_total.append(xyz_scatter)

            # natom x nei x outputs_size
            xyz_scatter = tf.concat(xyz_scatter_total, axis=1)
            # natom x outputs_size
            #
            res_rescale = 1. / 5.
            result = tf.reduce_mean(xyz_scatter, axis=1) * res_rescale

        return result
Ejemplo n.º 5
0
def one_layer(inputs,
              outputs_size,
              activation_fn=tf.nn.tanh,
              precision=GLOBAL_TF_FLOAT_PRECISION,
              stddev=1.0,
              bavg=0.0,
              name='linear',
              reuse=None,
              seed=None,
              use_timestep=False,
              trainable=True,
              useBN=False,
              uniform_seed=False,
              initial_variables=None,
              mixed_prec=None,
              final_layer=False):
    # For good accuracy, the last layer of the fitting network uses a higher precision neuron network.
    if mixed_prec is not None and final_layer:
        inputs = tf.cast(inputs, get_precision(mixed_prec['output_prec']))
    with tf.variable_scope(name, reuse=reuse):
        shape = inputs.get_shape().as_list()
        w_initializer = tf.random_normal_initializer(
            stddev=stddev / np.sqrt(shape[1] + outputs_size),
            seed=seed if (seed is None or uniform_seed) else seed + 0)
        b_initializer = tf.random_normal_initializer(
            stddev=stddev,
            mean=bavg,
            seed=seed if (seed is None or uniform_seed) else seed + 1)
        if initial_variables is not None:
            w_initializer = tf.constant_initializer(
                initial_variables[name + '/matrix'])
            b_initializer = tf.constant_initializer(initial_variables[name +
                                                                      '/bias'])
        w = tf.get_variable('matrix', [shape[1], outputs_size],
                            precision,
                            w_initializer,
                            trainable=trainable)
        variable_summaries(w, 'matrix')
        b = tf.get_variable('bias', [outputs_size],
                            precision,
                            b_initializer,
                            trainable=trainable)
        variable_summaries(b, 'bias')

        if mixed_prec is not None and not final_layer:
            inputs = tf.cast(inputs, get_precision(mixed_prec['compute_prec']))
            w = tf.cast(w, get_precision(mixed_prec['compute_prec']))
            b = tf.cast(b, get_precision(mixed_prec['compute_prec']))

        hidden = tf.nn.bias_add(tf.matmul(inputs, w), b)
        if activation_fn != None and use_timestep:
            idt_initializer = tf.random_normal_initializer(
                stddev=0.001,
                mean=0.1,
                seed=seed if (seed is None or uniform_seed) else seed + 2)
            if initial_variables is not None:
                idt_initializer = tf.constant_initializer(
                    initial_variables[name + '/idt'])
            idt = tf.get_variable('idt', [outputs_size],
                                  precision,
                                  idt_initializer,
                                  trainable=trainable)
            variable_summaries(idt, 'idt')
        if activation_fn != None:
            if useBN:
                None
                # hidden_bn = self._batch_norm(hidden, name=name+'_normalization', reuse=reuse)
                # return activation_fn(hidden_bn)
            else:
                if use_timestep:
                    if mixed_prec is not None and not final_layer:
                        idt = tf.cast(
                            idt, get_precision(mixed_prec['compute_prec']))
                    hidden = tf.reshape(activation_fn(hidden),
                                        [-1, outputs_size]) * idt
                else:
                    hidden = tf.reshape(activation_fn(hidden),
                                        [-1, outputs_size])

        if mixed_prec is not None:
            hidden = tf.cast(hidden, get_precision(mixed_prec['output_prec']))
        return hidden
Ejemplo n.º 6
0
def embedding_net(xx,
                  network_size,
                  precision,
                  activation_fn=tf.nn.tanh,
                  resnet_dt=False,
                  name_suffix='',
                  stddev=1.0,
                  bavg=0.0,
                  seed=None,
                  trainable=True,
                  uniform_seed=False,
                  initial_variables=None,
                  mixed_prec=None):
    r"""The embedding network.

    The embedding network function :math:`\mathcal{N}` is constructed by is the
    composition of multiple layers :math:`\mathcal{L}^{(i)}`:

    .. math::
        \mathcal{N} = \mathcal{L}^{(n)} \circ \mathcal{L}^{(n-1)}
        \circ \cdots \circ \mathcal{L}^{(1)}

    A layer :math:`\mathcal{L}` is given by one of the following forms,
    depending on the number of nodes: [1]_

    .. math::
        \mathbf{y}=\mathcal{L}(\mathbf{x};\mathbf{w},\mathbf{b})=
        \begin{cases}
            \boldsymbol{\phi}(\mathbf{x}^T\mathbf{w}+\mathbf{b}) + \mathbf{x}, & N_2=N_1 \\
            \boldsymbol{\phi}(\mathbf{x}^T\mathbf{w}+\mathbf{b}) + (\mathbf{x}, \mathbf{x}), & N_2 = 2N_1\\
            \boldsymbol{\phi}(\mathbf{x}^T\mathbf{w}+\mathbf{b}), & \text{otherwise} \\
        \end{cases}

    where :math:`\mathbf{x} \in \mathbb{R}^{N_1}`$` is the input vector and :math:`\mathbf{y} \in \mathbb{R}^{N_2}`
    is the output vector. :math:`\mathbf{w} \in \mathbb{R}^{N_1 \times N_2}` and
    :math:`\mathbf{b} \in \mathbb{R}^{N_2}`$` are weights and biases, respectively,
    both of which are trainable if `trainable` is `True`. :math:`\boldsymbol{\phi}`
    is the activation function.

    Parameters
    ----------
    xx : Tensor   
        Input tensor :math:`\mathbf{x}` of shape [-1,1]
    network_size: list of int
        Size of the embedding network. For example [16,32,64]
    precision: 
        Precision of network weights. For example, tf.float64
    activation_fn:
        Activation function :math:`\boldsymbol{\phi}`
    resnet_dt: boolean
        Using time-step in the ResNet construction
    name_suffix: str
        The name suffix append to each variable. 
    stddev: float
        Standard deviation of initializing network parameters
    bavg: float
        Mean of network intial bias
    seed: int
        Random seed for initializing network parameters
    trainable: boolean
        If the network is trainable
    uniform_seed : boolean
        Only for the purpose of backward compatibility, retrieves the old behavior of using the random seed
    initial_variables : dict
        The input dict which stores the embedding net variables
    mixed_prec
        The input dict which stores the mixed precision setting for the embedding net


    References
    ----------
    .. [1] Kaiming  He,  Xiangyu  Zhang,  Shaoqing  Ren,  and  Jian  Sun. Identitymappings
       in deep residual networks. InComputer Vision – ECCV 2016,pages 630–645. Springer
       International Publishing, 2016.
    """
    input_shape = xx.get_shape().as_list()
    outputs_size = [input_shape[1]] + network_size

    for ii in range(1, len(outputs_size)):
        w_initializer = tf.random_normal_initializer(
            stddev=stddev / np.sqrt(outputs_size[ii] + outputs_size[ii - 1]),
            seed=seed if (seed is None or uniform_seed) else seed + ii * 3 + 0)
        b_initializer = tf.random_normal_initializer(
            stddev=stddev,
            mean=bavg,
            seed=seed if (seed is None or uniform_seed) else seed + 3 * ii + 1)
        if initial_variables is not None:
            scope = tf.get_variable_scope().name
            w_initializer = tf.constant_initializer(
                initial_variables[scope + '/matrix_' + str(ii) + name_suffix])
            b_initializer = tf.constant_initializer(
                initial_variables[scope + '/bias_' + str(ii) + name_suffix])
        w = tf.get_variable('matrix_' + str(ii) + name_suffix,
                            [outputs_size[ii - 1], outputs_size[ii]],
                            precision,
                            w_initializer,
                            trainable=trainable)
        variable_summaries(w, 'matrix_' + str(ii) + name_suffix)

        b = tf.get_variable('bias_' + str(ii) + name_suffix,
                            [outputs_size[ii]],
                            precision,
                            b_initializer,
                            trainable=trainable)
        variable_summaries(b, 'bias_' + str(ii) + name_suffix)

        if mixed_prec is not None:
            xx = tf.cast(xx, get_precision(mixed_prec['compute_prec']))
            w = tf.cast(w, get_precision(mixed_prec['compute_prec']))
            b = tf.cast(b, get_precision(mixed_prec['compute_prec']))
        hidden = tf.reshape(activation_fn(tf.nn.bias_add(tf.matmul(xx, w), b)),
                            [-1, outputs_size[ii]])
        if resnet_dt:
            idt_initializer = tf.random_normal_initializer(
                stddev=0.001,
                mean=1.0,
                seed=seed if
                (seed is None or uniform_seed) else seed + 3 * ii + 2)
            if initial_variables is not None:
                scope = tf.get_variable_scope().name
                idt_initializer = tf.constant_initializer(
                    initial_variables[scope + '/idt_' + str(ii) + name_suffix])
            idt = tf.get_variable('idt_' + str(ii) + name_suffix,
                                  [1, outputs_size[ii]],
                                  precision,
                                  idt_initializer,
                                  trainable=trainable)
            variable_summaries(idt, 'idt_' + str(ii) + name_suffix)
            if mixed_prec is not None:
                idt = tf.cast(idt, get_precision(mixed_prec['compute_prec']))

        if outputs_size[ii] == outputs_size[ii - 1]:
            if resnet_dt:
                xx += hidden * idt
            else:
                xx += hidden
        elif outputs_size[ii] == outputs_size[ii - 1] * 2:
            if resnet_dt:
                xx = tf.concat([xx, xx], 1) + hidden * idt
            else:
                xx = tf.concat([xx, xx], 1) + hidden
        else:
            xx = hidden
    if mixed_prec is not None:
        xx = tf.cast(xx, get_precision(mixed_prec['output_prec']))
    return xx
Ejemplo n.º 7
0
def one_layer(inputs, 
              outputs_size, 
              activation_fn=tf.nn.tanh, 
              precision = GLOBAL_TF_FLOAT_PRECISION, 
              stddev=1.0,
              bavg=0.0,
              name='linear', 
              reuse=None,
              seed=None, 
              use_timestep = False, 
              trainable = True,
              useBN = False, 
              uniform_seed = False,
              initial_variables = None):
    with tf.variable_scope(name, reuse=reuse):
        shape = inputs.get_shape().as_list()
        w_initializer  = tf.random_normal_initializer(
                            stddev=stddev / np.sqrt(shape[1] + outputs_size),
                            seed=seed if (seed is None or uniform_seed) else seed + 0)
        b_initializer  = tf.random_normal_initializer(
                            stddev=stddev,
                            mean=bavg,
                            seed=seed if (seed is None or uniform_seed) else seed + 1)
        if initial_variables is not None:
            w_initializer = tf.constant_initializer(initial_variables[name + '/matrix'])
            b_initializer = tf.constant_initializer(initial_variables[name + '/bias'])
        w = tf.get_variable('matrix', 
                            [shape[1], outputs_size], 
                            precision,
                            w_initializer, 
                            trainable = trainable)
        variable_summaries(w, 'matrix')
        b = tf.get_variable('bias', 
                            [outputs_size], 
                            precision,
                            b_initializer, 
                            trainable = trainable)
        variable_summaries(b, 'bias')
        hidden = tf.matmul(inputs, w) + b
        if activation_fn != None and use_timestep :
            idt_initializer = tf.random_normal_initializer(
                                    stddev=0.001,
                                    mean=0.1,
                                    seed=seed if (seed is None or uniform_seed) else seed + 2)
            if initial_variables is not None:
                idt_initializer = tf.constant_initializer(initial_variables[name + '/idt'])
            idt = tf.get_variable('idt',
                                  [outputs_size],
                                  precision,
                                  idt_initializer, 
                                  trainable = trainable)
            variable_summaries(idt, 'idt')
        if activation_fn != None:
            if useBN:
                None
                # hidden_bn = self._batch_norm(hidden, name=name+'_normalization', reuse=reuse)   
                # return activation_fn(hidden_bn)
            else:
                if use_timestep :
                    return tf.reshape(activation_fn(hidden), [-1, outputs_size]) * idt
                else :
                    return tf.reshape(activation_fn(hidden), [-1, outputs_size])                    
        else:
            if useBN:
                None
                # return self._batch_norm(hidden, name=name+'_normalization', reuse=reuse)
            else:
                return hidden