Exemple #1
0
    def __init__(self, name, start, dtype):
        super().__init__(name=name)
        self.device = Params.DEVICE

        with tf.device(self.device), self.name_scope:
            self.start = start
            self.counter = tf.Variable(start, dtype=dtype, name=name)
            self.counter_cs = tf.CriticalSection(name=name + "_cs")
Exemple #2
0
    def __init__(self, layer, data_init=True, **kwargs):
        super(WeightNormalization, self).__init__(layer, **kwargs)
        self.data_init = data_init
        self._track_trackable(layer, name='layer')
        self._init_critical_section = tf.CriticalSection(name='init_mutex')
        self.is_rnn = isinstance(self.layer, tf.keras.layers.RNN)

        if self.data_init and self.is_rnn:
            logging.warn(
                "WeightNormalization: Using `data_init=True` with RNNs "
                "is advised against by the paper. Use `data_init=False`.")
Exemple #3
0
    def __init__(self, buffer_size=1000, scope='replay_buffer'):
        """Circular buffer of list of tensors.

    Args:
      buffer_size: (integer) maximum number of tensor lists the buffer can hold.
      scope: (string) variable scope for creating the variables.
    """
        self._buffer_size = np.int64(buffer_size)
        self._scope = scope
        self._tensors = collections.OrderedDict()
        with tf.variable_scope(self._scope):
            self._num_adds = tf.Variable(0, dtype=tf.int64, name='num_adds')
        self._num_adds_cs = tf.CriticalSection(name='num_adds')
    def __init__(self,
                 data_spec,
                 batch_size,
                 max_length=1000,
                 scope='TFUniformReplayBuffer',
                 device='cpu:*',
                 table_fn=table.Table):
        """Creates a TFUniformReplayBuffer.

    Args:
      data_spec: A TensorSpec or a list/tuple/nest of TensorSpecs describing a
        single item that can be stored in this buffer.
      batch_size: Batch dimension of tensors when adding to buffer.
      max_length: The maximum number of items that can be stored in a single
        batch segment of the buffer.
      scope: Scope prefix for variables and ops created by this class.
      device: A TensorFlow device to place the Variables and ops.
      table_fn: Function to create tables `table_fn(data_spec, capacity)` that
        can read/write nested tensors.

    Raises:
      ValueError: If batch_size does not evenly divide capacity.
    """
        self._batch_size = batch_size
        self._max_length = max_length
        capacity = self._batch_size * self._max_length
        super(TFUniformReplayBuffer, self).__init__(data_spec, capacity)

        self._id_spec = tensor_spec.TensorSpec([], dtype=tf.int64, name='id')
        self._capacity_value = np.int64(self._capacity)
        self._batch_offsets = (tf.range(self._batch_size, dtype=tf.int64) *
                               self._max_length)
        self._scope = scope
        self._device = device
        self._table_fn = table_fn
        # TODO(sguada) move to create_variables function so we can use make_template
        # to handle this.
        with tf.device(self._device), tf.compat.v1.variable_scope(self._scope):
            self._capacity = tf.constant(capacity, dtype=tf.int64)
            self._data_table = table_fn(self._data_spec, self._capacity_value)
            self._id_table = table_fn(self._id_spec, self._capacity_value)
            self._last_id = tf.compat.v1.get_variable(
                name='last_id',
                shape=[],
                dtype=tf.int64,
                initializer=tf.compat.v1.initializers.constant(-1,
                                                               dtype=tf.int64),
                use_resource=True,
                trainable=False)
            self._last_id_cs = tf.CriticalSection(name='last_id')
Exemple #5
0
    def __init__(self, population_size, selection_probability):
        """Initialize the PrivacyLedger.

    Args:
      population_size: An integer (may be variable) specifying the size of the
        population, i.e. size of the training data used in each epoch.
      selection_probability: A float (may be variable) specifying the
        probability each record is included in a sample.

    Raises:
      ValueError: If selection_probability is 0.
    """
        self._population_size = population_size
        self._selection_probability = selection_probability

        if tf.executing_eagerly():
            if tf.equal(selection_probability, 0):
                raise ValueError('Selection probability cannot be 0.')
            init_capacity = tf.cast(tf.ceil(1 / selection_probability),
                                    tf.int32)
        else:
            if selection_probability == 0:
                raise ValueError('Selection probability cannot be 0.')
            init_capacity = np.int(np.ceil(1 / selection_probability))

        # The query buffer stores rows corresponding to GaussianSumQueryEntries.
        self._query_buffer = tensor_buffer.TensorBuffer(
            init_capacity, [3], tf.float32, 'query')
        self._sample_var = tf.Variable(initial_value=tf.zeros([3]),
                                       trainable=False,
                                       name='sample')

        # The sample buffer stores rows corresponding to SampleEntries.
        self._sample_buffer = tensor_buffer.TensorBuffer(
            init_capacity, [3], tf.float32, 'sample')
        self._sample_count = tf.Variable(initial_value=0.0,
                                         trainable=False,
                                         name='sample_count')
        self._query_count = tf.Variable(initial_value=0.0,
                                        trainable=False,
                                        name='query_count')
        try:
            # Newer versions of TF
            self._cs = tf.CriticalSection()
        except AttributeError:
            # Older versions of TF
            self._cs = tf.contrib.framework.CriticalSection()
Exemple #6
0
    def __init__(self,
                 population_size,
                 selection_probability=None,
                 max_samples=None,
                 max_queries=None):
        """Initialize the PrivacyLedger.

    Args:
      population_size: An integer (may be variable) specifying the size of the
        population, i.e. size of the training data used in each epoch.
      selection_probability: A float (may be variable) specifying the
        probability each record is included in a sample.
      max_samples: The maximum number of samples. An exception is thrown if more
        than this many samples are recorded.
      max_queries: The maximum number of queries. An exception is thrown if more
        than this many queries are recorded.
    """
        self._population_size = population_size
        self._selection_probability = selection_probability
        if max_samples is None:
            max_samples = 1000 * population_size
        if max_queries is None:
            max_queries = 1000 * population_size

        # The query buffer stores rows corresponding to GaussianSumQueryEntries.
        self._query_buffer = tensor_buffer.TensorBuffer(
            max_queries, [3], tf.float32, 'query')
        self._sample_var = tf.Variable(initial_value=tf.zeros([3]),
                                       trainable=False,
                                       name='sample')

        # The sample buffer stores rows corresponding to SampleEntries.
        self._sample_buffer = tensor_buffer.TensorBuffer(
            max_samples, [3], tf.float32, 'sample')
        self._sample_count = tf.Variable(initial_value=0.0,
                                         trainable=False,
                                         name='sample_count')
        self._query_count = tf.Variable(initial_value=0.0,
                                        trainable=False,
                                        name='query_count')
        try:
            # Newer versions of TF
            self._cs = tf.CriticalSection()
        except AttributeError:
            # Older versions of TF
            self._cs = tf.contrib.framework.CriticalSection()
Exemple #7
0
    def __init__(self):
        super().__init__(name="UniformReplayBuffer")
        self.device = Params.DEVICE

        with tf.device(self.device), self.name_scope:

            self.data = tf.nest.map_structure(lambda spec: tf.Variable(
                initial_value=tf.zeros(
                    (Params.BUFFER_SIZE, spec.shape[0]), dtype=spec.dtype),
                trainable=False,
                validate_shape=False,
                dtype=spec.dtype,
                shape=(Params.BUFFER_SIZE, spec.shape[0])),
                                              Params.BUFFER_DATA_SPEC,
                                              check_types=False)
            self.capacity = Params.BUFFER_SIZE
            self.last_id = tf.Variable(-1, dtype=tf.int32, name="last_id")
            self.last_id_cs = tf.CriticalSection(name='last_id')
Exemple #8
0
    def __init__(self, population_size, selection_probability):
        """Initialize the PrivacyLedger.

    Args:
      population_size: An integer (may be variable) specifying the size of the
        population, i.e. size of the training data used in each epoch.
      selection_probability: A float (may be variable) specifying the
        probability each record is included in a sample.

    Raises:
      ValueError: If selection_probability is 0.
    """
        self._population_size = population_size
        self._selection_probability = selection_probability

        if tf.executing_eagerly():
            if tf.equal(selection_probability, 0):
                raise ValueError('Selection probability cannot be 0.')
            init_capacity = tf.cast(tf.math.ceil(1 / selection_probability),
                                    tf.int32)
        else:
            if selection_probability == 0:
                raise ValueError('Selection probability cannot be 0.')
            init_capacity = np.int(np.ceil(1 / selection_probability))

        # The query buffer stores rows corresponding to P_exponentialSumQueryEntries.
        #这里的query buffer需要存储的是p_exponentialsumquery=[exponents,samples_counts, clipping bound, dimension, beta],故每一个query所需的size=4
        self._query_buffer = tensor_buffer.TensorBuffer(
            init_capacity, [5], tf.float32, 'query')
        #弄清楚_sample_var,_sample_buffer,_sample_count的不同,后两者很好理解,主要是_sample_var是什么意思._sample_var=[exponents,population_size,selection_probability,query_count]

        self._sample_var = tf.Variable(initial_value=tf.zeros([3]),
                                       trainable=False,
                                       name='sample')
        # The sample buffer stores rows corresponding to SampleEntries.
        self._sample_buffer = tensor_buffer.TensorBuffer(
            init_capacity, [3], tf.float32, 'sample')
        self._sample_count = tf.Variable(initial_value=0.0,
                                         trainable=False,
                                         name='sample_count')
        self._query_count = tf.Variable(initial_value=0.0,
                                        trainable=False,
                                        name='query_count')
        self._cs = tf.CriticalSection()
  def __init__(self,
               population_size,
               selection_probability):
    """Initialize the PrivacyLedger.

    Args:
      population_size: An integer (may be variable) specifying the size of the
        population, i.e. size of the training data used in each epoch.
      selection_probability: A float (may be variable) specifying the
        probability each record is included in a sample.

    Raises:
      ValueError: If selection_probability is 0.
    """
    self._population_size = population_size
    self._selection_probability = selection_probability

    #根据tensorflow的执行方式不同,初始化init_capacity的方式不同。init_capacity的意义是什么呢?
    if tf.executing_eagerly():
      if tf.equal(selection_probability, 0):
        raise ValueError('Selection probability cannot be 0.')
      init_capacity = tf.cast(tf.math.ceil(1 / selection_probability), tf.int32)
    else:
      if selection_probability == 0:
        raise ValueError('Selection probability cannot be 0.')
      init_capacity = np.int(np.ceil(1 / selection_probability))

    # The query buffer stores rows corresponding to GaussianSumQueryEntries.
    self._query_buffer = tensor_buffer.TensorBuffer(
        init_capacity, [3], tf.float32, 'query')
    self._sample_var = tf.Variable(
        initial_value=tf.zeros([3]), trainable=False, name='sample')
    # _sample_var=[population size,selecting probability,self._query_count]
    # The sample buffer stores rows corresponding to SampleEntries.
    self._sample_buffer = tensor_buffer.TensorBuffer(
        init_capacity, [3], tf.float32, 'sample')
    self._sample_count = tf.Variable(
        initial_value=0.0, trainable=False, name='sample_count')
    self._query_count = tf.Variable(
        initial_value=0.0, trainable=False, name='query_count')
    self._cs = tf.CriticalSection()#这个函数保证了buffer中的顺序读写以及对于其中变量等更新过程的原子性
    def __init__(self,
                 data_spec,
                 batch_size,
                 max_length=1000,
                 scope='TFUniformReplayBuffer',
                 device='cpu:*',
                 table_fn=table.Table,
                 dataset_drop_remainder=False,
                 dataset_window_shift=None,
                 stateful_dataset=False):
        """Creates a TFUniformReplayBuffer.

    The TFUniformReplayBuffer stores episodes in `B == batch_size` blocks of
    size `L == max_length`, with total frame capacity
    `C == L * B`.  Storage looks like:

    ```
    block1 ep1 frame1
               frame2
           ...
           ep2 frame1
               frame2
           ...
           <L frames total>
    block2 ep1 frame1
               frame2
           ...
           ep2 frame1
               frame2
           ...
           <L frames total>
    ...
    blockB ep1 frame1
               frame2
           ...
           ep2 frame1
               frame2
           ...
           <L frames total>
    ```
    Multiple episodes may be stored within a given block, up to `max_length`
    frames total.  In practice, new episodes will overwrite old ones as the
    block rolls over its `max_length`.

    Args:
      data_spec: A TensorSpec or a list/tuple/nest of TensorSpecs describing a
        single item that can be stored in this buffer.
      batch_size: Batch dimension of tensors when adding to buffer.
      max_length: The maximum number of items that can be stored in a single
        batch segment of the buffer.
      scope: Scope prefix for variables and ops created by this class.
      device: A TensorFlow device to place the Variables and ops.
      table_fn: Function to create tables `table_fn(data_spec, capacity)` that
        can read/write nested tensors.
      dataset_drop_remainder: If `True`, then when calling
        `as_dataset` with arguments `single_deterministic_pass=True` and
        `sample_batch_size is not None`, the final batch will be dropped if it
        does not contain exactly `sample_batch_size` items.  This is helpful for
        static shape inference as the resulting tensors will always have
        leading dimension `sample_batch_size` instead of `None`.
      dataset_window_shift: Window shift used when calling `as_dataset` with
        arguments `single_deterministic_pass=True` and `num_steps is not None`.
        This determines how the resulting frames are windowed.  If `None`, then
        there is no overlap created between frames and each frame is seen
        exactly once.  For example, if `max_length=5`, `num_steps=2`,
        `sample_batch_size=None`, and `dataset_window_shift=None`, then the
        datasets returned will have frames `{[0, 1], [2, 3], [4]}`.

        If `dataset_window_shift is not None`, then windows are created with a
        window overlap of `dataset_window_shift` and you will see each frame up
        to `num_steps` times.  For example, if `max_length=5`, `num_steps=2`,
        `sample_batch_size=None`, and `dataset_window_shift=1`, then the
        datasets returned will have windows of shifted repeated frames:
        `{[0, 1], [1, 2], [2, 3], [3, 4], [4, 5]}`.

        For more details, see the documentation of `tf.data.Dataset.window`,
        specifically for the `shift` argument.

        The default behavior is to not overlap frames
        (`dataset_window_shift=None`) but users often want to see all
        combinations of frame sequences, in which case `dataset_window_shift=1`
        is the appropriate value.
      stateful_dataset: whether the dataset contains stateful ops or not.
    """
        self._batch_size = batch_size
        self._max_length = max_length
        capacity = self._batch_size * self._max_length
        super(TFUniformReplayBuffer, self).__init__(data_spec, capacity,
                                                    stateful_dataset)

        self._id_spec = tensor_spec.TensorSpec([], dtype=tf.int64, name='id')
        self._capacity_value = np.int64(self._capacity)
        self._batch_offsets = (tf.range(self._batch_size, dtype=tf.int64) *
                               self._max_length)
        self._scope = scope
        self._device = device
        self._table_fn = table_fn
        self._dataset_drop_remainder = dataset_drop_remainder
        self._dataset_window_shift = dataset_window_shift
        with tf.device(self._device), tf.compat.v1.variable_scope(self._scope):
            self._capacity = tf.constant(capacity, dtype=tf.int64)
            self._data_table = table_fn(self._data_spec, self._capacity_value)
            self._id_table = table_fn(self._id_spec, self._capacity_value)
            self._last_id = common.create_variable('last_id', -1)
            self._last_id_cs = tf.CriticalSection(name='last_id')
Exemple #11
0
 def __init__(self, layer, data_init=True, **kwargs):
     super(WeightNormalization, self).__init__(layer, **kwargs)
     self.data_init = data_init
     self._track_trackable(layer, name='layer')
     self._init_critical_section = tf.CriticalSection(name='init_mutex')
     self.is_rnn = isinstance(self.layer, tf.keras.layers.RNN)
Exemple #12
0
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module for user-facing generators of tfq ops."""
import enum

import cirq
import tensorflow as tf

from tensorflow_quantum.core.ops import (cirq_ops, tfq_simulate_ops,
                                         tfq_utility_ops)
from tensorflow_quantum.python import quantum_context

_GLOBAL_OP_LOCK = tf.CriticalSection()


class TFQWavefunctionSimulator(enum.Enum):
    """Enum to make specifying TFQ simulators user-friendly."""
    expectation = tfq_simulate_ops.tfq_simulate_expectation
    samples = tfq_simulate_ops.tfq_simulate_samples
    state = tfq_simulate_ops.tfq_simulate_state
    sampled_expectation = tfq_simulate_ops.tfq_simulate_sampled_expectation


def _check_quantum_concurrent(quantum_concurrent):
    if not isinstance(quantum_concurrent, bool):
        raise TypeError("quantum_concurrent must be type bool."
                        " Given: {}".format(str(type(quantum_concurrent))))
Exemple #13
0
            runtime_coef = he_std * self.lrmul
        else:
            runtime_coef = self.lrmul
        return self.next_layer(inputs * runtime_coef, training=training)

    def get_config(self):
        config = {
            'lrmul': self.lrmul,
            'gain': self.gain,
            'next_layer': serialize(self.next_layer)
        }
        base_config = super(WscaleNormalizer, self).get_config()
        return {**base_config, **config}


cs = tf.CriticalSection(name='init_mutex')


@tf.keras.utils.register_keras_serializable(package='Dynastes')
class WeightNormalizer(tfkl.Layer):

    def __init__(self,
                 weight_initializer,
                 next_layer=tfkl.Activation('linear'),
                 **kwargs):
        super(WeightNormalizer, self).__init__(**kwargs)
        self.orig_weight_initializer = tfki.get(weight_initializer)
        self.next_layer = get(next_layer)
        self._init_critical_section = cs
        self.g = None