示例#1
0
文件: base.py 项目: zfxu/Dragon
    def add_variable(self,
                     name,
                     shape,
                     dtype=None,
                     trainable=True,
                     initializer=None,
                     regularizer=None):
        if dtype is None:
            dtype = self.dtype
        existing_variables = set(tf_variables.global_variables())

        with vs.variable_scope(self._scope, reuse=self.built
                               or self._reuse) as scope:
            with ops.name_scope(scope.original_name_scope):

                full_name = get_tensor_scope() + name
                variable = vs.get_variable(name,
                                           shape=shape,
                                           initializer=initializer,
                                           dtype=dtypes.as_dtype(dtype),
                                           trainable=trainable
                                           and self.trainable)
                if variable in existing_variables:
                    # Work only if the layer is built
                    return variable
                if regularizer:
                    raise NotImplementedError()

        if trainable:
            self._trainable_weights.append(variable)
        else:
            self._non_trainable_weights.append(variable)
        return variable
示例#2
0
文件: base.py 项目: neopenx/Dragon
    def add_variable(self, name, shape, dtype=None, trainable=True,
                     initializer=None, regularizer=None):
        if dtype is None:
            dtype = self.dtype
        existing_variables = set(tf_variables.global_variables())

        with vs.variable_scope(self._scope,
                               reuse=self.built or self._reuse) as scope:
            with ops.name_scope(scope.original_name_scope):

                full_name = get_tensor_scope() + name
                variable = vs.get_variable(name,
                                           shape=shape,
                                           initializer=initializer,
                                           dtype=dtypes.as_dtype(dtype),
                                           trainable=trainable and self.trainable)
                if variable in existing_variables:
                    # Work only if the layer is built
                    return variable
                if regularizer:
                    raise NotImplementedError()

        if trainable:
            self._trainable_weights.append(variable)
        else:
            self._non_trainable_weights.append(variable)
        return variable
示例#3
0
文件: base.py 项目: zfxu/Dragon
 def __call__(self, inputs, *args, **kwargs):
     with vs.variable_scope(self._scope, reuse=self.built
                            or self._reuse) as scope:
         with ops.name_scope(scope.original_name_scope):
             if not self.built:
                 input_shapes = [
                     x.get_shape() for x in nest.flatten(inputs)
                 ]
                 if len(input_shapes) == 1:
                     self.build(input_shapes[0])
                 else:
                     self.build(input_shapes)
             outputs = self.call(inputs, *args, **kwargs)
             # # Apply activity regularization.
             # # Note that it should be applied every time the layer creates a new
             # # output, since it is output-specific.
             # if hasattr(self, 'activity_regularizer') and self.activity_regularizer:
             #     output_list = _to_list(outputs)
             #     for output in output_list:
             #         with ops.name_scope('ActivityRegularizer'):
             #             activity_regularization = self.activity_regularizer(output)
             #         self.add_loss(activity_regularization)
             #         _add_elements_to_collection(
             #             activity_regularization, ops.GraphKeys.REGULARIZATION_LOSSES)
     # Update global default collections.
     _add_elements_to_collection(self.updates, ops.GraphKeys.UPDATE_OPS)
     self.built = True
     return outputs
示例#4
0
文件: base.py 项目: neopenx/Dragon
 def __call__(self, inputs, *args, **kwargs):
     with vs.variable_scope(self._scope,
                            reuse=self.built or self._reuse) as scope:
         with ops.name_scope(scope.original_name_scope):
             if not self.built:
                 input_shapes = [x.get_shape()
                                 for x in nest.flatten(inputs)]
                 if len(input_shapes) == 1:
                     self.build(input_shapes[0])
                 else:
                     self.build(input_shapes)
             outputs = self.call(inputs, *args, **kwargs)
             # # Apply activity regularization.
             # # Note that it should be applied every time the layer creates a new
             # # output, since it is output-specific.
             # if hasattr(self, 'activity_regularizer') and self.activity_regularizer:
             #     output_list = _to_list(outputs)
             #     for output in output_list:
             #         with ops.name_scope('ActivityRegularizer'):
             #             activity_regularization = self.activity_regularizer(output)
             #         self.add_loss(activity_regularization)
             #         _add_elements_to_collection(
             #             activity_regularization, ops.GraphKeys.REGULARIZATION_LOSSES)
     # Update global default collections.
     _add_elements_to_collection(self.updates, ops.GraphKeys.UPDATE_OPS)
     self.built = True
     return outputs