Esempio n. 1
0
def unstack_layer(prev_layer, num=None, axis=0, name='unstack'):
    """
    It is layer for unstacking the given dimension of a rank-R tensor into rank-(R-1) tensors., see `tf.unstack() <https://www.tensorflow.org/api_docs/python/tf/unstack>`__.

    Parameters
    ----------
    prev_layer : :class:`Layer`
        Previous layer
    num : int or None
        The length of the dimension axis. Automatically inferred if None (the default).
    axis : int
        Dimension along which axis to concatenate.
    name : str
        A unique layer name.

    Returns
    -------
    list of :class:`Layer`
        The list of layer objects unstacked from the input.

    """
    inputs = prev_layer.outputs
    with tf.variable_scope(name):
        outputs = tf.unstack(inputs, num=num, axis=axis)

    logging.info("UnStackLayer %s: num: %s axis: %d, n_outputs: %d" %
                 (name, num, axis, len(outputs)))

    net_new = []
    scope_name = tf.get_variable_scope().name
    if scope_name:
        full_name = scope_name + '/' + name
    else:
        full_name = name

    for i, _v in enumerate(outputs):
        n = Layer(prev_layer=prev_layer, name=full_name + str(i))
        n.outputs = outputs[i]
        # n.all_layers = list(layer.all_layers)
        # n.all_params = list(layer.all_params)
        # n.all_drop = dict(layer.all_drop)
        # n.all_layers.append(inputs)

        net_new.append(n)

    return net_new
Esempio n. 2
0
    def __init__(self, prev_layer, num=None, axis=0, name='unstack'):

        super(UnStackLayer, self).__init__(prev_layer=prev_layer, name=name)

        outputs = tf.unstack(self.inputs, num=num, axis=axis, name=name)

        logging.info("UnStackLayer %s: num: %s axis: %d, n_outputs: %d" % (self.name, num, axis, len(outputs)))

        net_new = []

        for i, unstacked_dim in enumerate(outputs):
            layer = Layer(prev_layer=self, name=name + str(i))
            layer.outputs = unstacked_dim

            net_new.append(layer)

        self.outputs = net_new

        self._add_layers(net_new)
Esempio n. 3
0
    def __init__(
        self,
        prev_layer,
        init_scale=0.05,
        name='scale',
    ):
        Layer.__init__(self, prev_layer=prev_layer, name=name)
        self.inputs = prev_layer.outputs

        logging.info("ScaleLayer  %s: init_scale: %f" %
                     (self.name, init_scale))
        with tf.variable_scope(name):
            # scale = tf.get_variable(name='scale_factor', init, trainable=True, )
            scale = tf.get_variable(
                "scale",
                shape=[1],
                initializer=tf.constant_initializer(value=init_scale),
                trainable=False)
            self.outputs = self.inputs * scale

        self.all_layers.append(self.outputs)
        self.all_params.append(scale)
Esempio n. 4
0
    def __init__(self,
                 prev_layer,
                 n_units=100,
                 is_train=False,
                 bn=False,
                 W_init=tf.truncated_normal_initializer(stddev=0.1,
                                                        seed=global_seed),
                 b_init=tf.constant_initializer(value=0.0),
                 name='Dense_with_bn'):
        Layer.__init__(self, prev_layer=prev_layer, name=name)
        self.inputs = prev_layer.outputs
        self.is_train = is_train

        n_in = int(
            self.inputs.get_shape()[-1])  # obtain pre_layer's input number
        with tf.variable_scope(name):
            W = tf.get_variable(name='W',
                                shape=(n_in, n_units),
                                initializer=W_init,
                                dtype=tf.float32)
            b = tf.get_variable(name='b',
                                shape=n_units,
                                initializer=b_init,
                                dtype=tf.float32)
            w_x_b = tf.matmul(self.inputs, W) + b
            if bn:
                print("DenseLayer(bn)  %s: %d %s" % (self.name, n_units, "bn"))
                w_x_b = tf.layers.batch_normalization(w_x_b,
                                                      training=self.is_train,
                                                      name='norm')
            else:
                print("DenseLayer  %s: %d" % (self.name, n_units))
            self.outputs = tf.nn.relu(w_x_b)

        # update layer paras
        self.all_layers.append(self.outputs)
        self.all_params.extend([W, b])
Esempio n. 5
0
    def __init__(
        self,
        prev_layer,
        act=None,
        shape=(5, 5, 1, 100),
        strides=(1, 1, 1, 1),
        padding='SAME',
        W_init=tf.truncated_normal_initializer(stddev=0.02),
        b_init=tf.constant_initializer(value=0.0),
        W_init_args=None,
        b_init_args=None,
        use_cudnn_on_gpu=None,
        use_sn=False,
        update_collection=None,
        data_format=None,
        name='cnn_layer',
    ):
        super(SpectralConv2dLayer, self).__init__(prev_layer=prev_layer,
                                                  act=act,
                                                  W_init_args=W_init_args,
                                                  b_init_args=b_init_args,
                                                  name=name)

        logging.info(
            "SpectralConv2dLayer %s: shape: %s strides: %s pad: %s act: %s spectral: %s"
            % (self.name, str(shape), str(strides), padding, self.act.__name__
               if self.act is not None else 'No Activation', use_sn))
        # check layer name (fixed)
        Layer.__init__(self, prev_layer=prev_layer, name=name)

        # the input of this layer is the output of previous layer (fixed)
        self.inputs = prev_layer.outputs

        with tf.variable_scope(name) as scope:
            if self._scope_has_variables(scope):
                scope.reuse_variables()
            W = tf.get_variable(name='W_conv2d',
                                shape=shape,
                                initializer=W_init,
                                dtype=LayersConfig.tf_dtype,
                                **self.W_init_args)
            if use_sn:
                self.outputs = tf.nn.conv2d(
                    self.inputs,
                    self._spectral_normed_weight(
                        W, update_collection=update_collection),
                    strides=strides,
                    padding=padding,
                    use_cudnn_on_gpu=use_cudnn_on_gpu,
                    data_format=data_format)

            else:
                self.outputs = tf.nn.conv2d(self.inputs,
                                            W,
                                            strides=strides,
                                            padding=padding,
                                            use_cudnn_on_gpu=use_cudnn_on_gpu,
                                            data_format=data_format)
            if b_init:
                b = tf.get_variable(name='b_conv2d',
                                    shape=(shape[-1]),
                                    initializer=b_init,
                                    dtype=LayersConfig.tf_dtype,
                                    **self.b_init_args)

                self.outputs = tf.nn.bias_add(self.outputs, b, name='bias_add')

            self.outputs = self._apply_activation(self.outputs)

        self._add_layers(self.outputs)

        if b_init:
            self._add_params([W, b])
        else:
            self._add_params(W)
Esempio n. 6
0
    def __init__(
        self,
        prev_layer,
        n_units=100,
        act=None,
        W_init=tf.truncated_normal_initializer(stddev=0.1),
        b_init=tf.constant_initializer(value=0.0),
        W_init_args=None,
        b_init_args=None,
        use_sn=False,
        update_collection=None,
        name='dense',
    ):

        super(SpectralDenseLayer, self).__init__(prev_layer=prev_layer,
                                                 act=act,
                                                 W_init_args=W_init_args,
                                                 b_init_args=b_init_args,
                                                 name=name)

        logging.info("DenseLayer  %s: %d %s spectral: %s" %
                     (self.name, n_units, self.act.__name__
                      if self.act is not None else 'No Activation', use_sn))
        # check layer name (fixed)
        Layer.__init__(self, prev_layer=prev_layer, name=name)

        # the input of this layer is the output of previous layer (fixed)
        self.inputs = prev_layer.outputs

        self.n_units = n_units

        if self.inputs.get_shape().ndims != 2:
            raise AssertionError(
                "The input dimension must be rank 2, please reshape or flatten it"
            )

        n_in = int(self.inputs.get_shape()[-1])

        with tf.variable_scope(name) as scope:
            if self._scope_has_variables(scope):
                scope.reuse_variables()
            W = tf.get_variable(name='W',
                                shape=(n_in, n_units),
                                initializer=W_init,
                                dtype=LayersConfig.tf_dtype,
                                **self.W_init_args)

            if use_sn:  # if set spectral norm True
                self.outputs = tf.matmul(
                    self.inputs,
                    self._spectral_normed_weight(
                        W, update_collection=update_collection))

            else:
                self.outputs = tf.matmul(self.inputs, W)

            if b_init is not None:
                try:
                    b = tf.get_variable(name='b',
                                        shape=(n_units),
                                        initializer=b_init,
                                        dtype=LayersConfig.tf_dtype,
                                        **self.b_init_args)
                except Exception:  # If initializer is a constant, do not specify shape.
                    b = tf.get_variable(name='b',
                                        initializer=b_init,
                                        dtype=LayersConfig.tf_dtype,
                                        **self.b_init_args)

                self.outputs = tf.nn.bias_add(self.outputs, b, name='bias_add')

            self.outputs = self._apply_activation(self.outputs)

        self._add_layers(self.outputs)
        if b_init is not None:
            self._add_params([W, b])
        else:
            self._add_params(W)
Esempio n. 7
0
    def __init__(
        self,
        prev_layer,
        decay=0.9,
        # de_decay=0.1,
        epsilon=0.00001,
        act=tf.identity,
        is_train=False,
        is_act=1.0,  # work in the experiment of turning bn up or down
        # alpha_init=1.0,
        beta_init=tf.zeros_initializer,
        gamma_init=tf.random_normal_initializer(mean=1.0, stddev=0.002),
        name='modify_batchnorm_layer',
    ):
        Layer.__init__(self, prev_layer=prev_layer, name=name)
        self.epsilon = epsilon
        self.inputs = prev_layer.outputs
        x_shape = self.inputs.get_shape()
        params_shape = x_shape[-1:]

        from tensorflow.python.training import moving_averages

        with tf.variable_scope(name):
            axis = list(range(len(x_shape) - 1))
            # 1. beta, gamma
            variables = []
            if beta_init:
                if tf.__version__ > '0.12.1' and beta_init == tf.zeros_initializer:
                    beta_init = beta_init()
                beta = tf.get_variable('beta',
                                       shape=params_shape,
                                       initializer=beta_init,
                                       dtype=LayersConfig.tf_dtype,
                                       trainable=is_train)
                variables.append(beta)
            else:
                beta = None

            if gamma_init:
                gamma = tf.get_variable(
                    'gamma',
                    shape=params_shape,
                    initializer=gamma_init,
                    dtype=LayersConfig.tf_dtype,
                    trainable=is_train,
                )
                variables.append(gamma)
            else:
                gamma = None

            # 2.
            if tf.__version__ > '0.12.1':
                moving_mean_init = tf.zeros_initializer()
            else:
                moving_mean_init = tf.zeros_initializer
            moving_mean = tf.get_variable('moving_mean',
                                          params_shape,
                                          initializer=moving_mean_init,
                                          dtype=LayersConfig.tf_dtype,
                                          trainable=False)
            moving_variance = tf.get_variable(
                'moving_variance',
                params_shape,
                initializer=tf.constant_initializer(1.),
                dtype=LayersConfig.tf_dtype,
                trainable=False)

            # 3.
            # These ops will only be preformed when training.
            mean, variance = tf.nn.moments(self.inputs, axis)
            self.mean = mean
            self.variance = variance
            try:  # TF12
                update_moving_mean = moving_averages.assign_moving_average(
                    moving_mean, mean, decay,
                    zero_debias=False)  # if zero_debias=True, has bias
                update_moving_variance = moving_averages.assign_moving_average(
                    moving_variance, variance, decay,
                    zero_debias=False)  # if zero_debias=True, has bias
            # logging.info("TF12 moving")
            except Exception:  # TF11
                update_moving_mean = moving_averages.assign_moving_average(
                    moving_mean, mean, decay)
                update_moving_variance = moving_averages.assign_moving_average(
                    moving_variance, variance, decay)

            # logging.info("TF11 moving")

            def mean_var_with_update():
                with tf.control_dependencies(
                    [update_moving_mean, update_moving_variance]):
                    return tf.identity(mean), tf.identity(variance)

            if is_train:
                avg, var = mean_var_with_update()
                avg_ = tf.add(tf.multiply(is_act, avg),
                              tf.multiply((1.0 - is_act), moving_mean))
                var_ = tf.add(tf.multiply(is_act, var),
                              tf.multiply((1.0 - is_act), moving_variance))
                self.outputs = act(
                    tf.nn.batch_normalization(self.inputs, avg_, var_, beta,
                                              gamma, epsilon))
            else:
                self.outputs = act(
                    tf.nn.batch_normalization(self.inputs, moving_mean,
                                              moving_variance, beta, gamma,
                                              epsilon))

        self.all_layers.append(self.outputs)
        self.all_params.extend(variables)
Esempio n. 8
0
    def __init__(
        self,
        prev_layer,
        epsilon=0.001,
        act=tf.identity,
        group_num=16,
        is_train=True,
        # alpha_init=1.0,
        beta_init=tf.zeros_initializer,
        gamma_init=tf.random_normal_initializer(mean=1.0, stddev=0.002),
        name='modify_batchnorm_layer',
    ):
        Layer.__init__(self, prev_layer=prev_layer, name=name)
        self.inputs = prev_layer.outputs
        x_shape = self.inputs.get_shape()
        params_shape = x_shape[-1:]
        self.inputs = tf.reshape(
            self.inputs, [-1, -1, x_shape[2].value // group_num, group_num])
        self.epsilon = epsilon

        with tf.variable_scope(name):
            axis = list(range(1, (len(x_shape) - 1)))  # shape:[x, o, o, x]
            # 1. beta, gamma
            variables = []
            if beta_init:
                if tf.__version__ > '0.12.1' and beta_init == tf.zeros_initializer:
                    beta_init = beta_init()
                beta = tf.get_variable('beta',
                                       shape=params_shape,
                                       initializer=beta_init,
                                       dtype=LayersConfig.tf_dtype,
                                       trainable=is_train)
                variables.append(beta)
            else:
                beta = None

            if gamma_init:
                gamma = tf.get_variable(
                    'gamma',
                    shape=params_shape,
                    initializer=gamma_init,
                    dtype=LayersConfig.tf_dtype,
                    trainable=is_train,
                )
                variables.append(gamma)
            else:
                gamma = None

            # These ops will only be preformed when training.
            mean, variance = tf.nn.moments(self.inputs, axis)
            self.mean = mean
            self.variance = variance

            with ops.name_scope(name, "groupnorm",
                                [self.inputs, mean, variance, gamma, beta]):
                x = (self.inputs - mean) / tf.sqrt(variance + epsilon)
                x = tf.reshape(x, [-1, -1, x_shape[2].value])
                self.outputs = act(x * gamma + beta)

        self.all_layers.append(self.outputs)
        self.all_params.extend(variables)