Ejemplo n.º 1
0
    def testReadPartialSlots(self):
        spec = [
            specs.TensorSpec([3], tf.float32, 'action'),
            [
                specs.TensorSpec([5], tf.float32, 'camera'),
                specs.TensorSpec([3, 2], tf.float32, 'lidar')
            ]
        ]
        replay_table = table.Table(spec, capacity=4)

        batch_size = 2
        action = 1 * np.ones([batch_size] + spec[0].shape.as_list())
        camera = 2 * np.ones([batch_size] + spec[1][0].shape.as_list())
        lidar = 3 * np.ones([batch_size] + spec[1][1].shape.as_list())

        values = [action, [camera, lidar]]
        tensors = nest.map_structure(
            lambda x: tf.convert_to_tensor(x, dtype=tf.float32), values)

        write_op = replay_table.write(list(range(batch_size)), tensors)
        read_op = replay_table.read(list(range(batch_size)),
                                    slots=['lidar', ['action']])
        self.evaluate(tf.global_variables_initializer())
        self.evaluate(write_op)
        read_value_ = self.evaluate(read_op)
        expected_values = [lidar, [action]]
        nest.map_structure(self.assertAllClose, read_value_, expected_values)
Ejemplo n.º 2
0
    def testReadWriteString(self):
        spec = [
            specs.TensorSpec([3], tf.float32, 'action'),
            [
                specs.TensorSpec([], tf.string, 'camera'),
                specs.TensorSpec([3, 2], tf.float32, 'lidar')
            ]
        ]
        replay_table = table.Table(spec, capacity=3)
        variables = replay_table.variables()
        self.assertEqual(3, len(variables))
        self.assertAllEqual(
            ['Table/action:0', 'Table/camera:0', 'Table/lidar:0'],
            [v.name for v in variables])

        expected_values = [
            1 * np.ones(spec[0].shape.as_list()),
            [b'foo', 3 * np.ones(spec[1][1].shape.as_list())]
        ]
        tensors = nest.map_structure(
            lambda x, dtype: tf.convert_to_tensor(x, dtype=dtype),
            expected_values, [tf.float32, [tf.string, tf.float32]])

        write_op = replay_table.write(0, tensors)
        read_op = replay_table.read(0)
        self.evaluate(tf.global_variables_initializer())
        self.evaluate(write_op)
        read_value_ = self.evaluate(read_op)
        self.assertAllClose(read_value_[0], expected_values[0])
        self.assertEqual(read_value_[1][0], expected_values[1][0])
        self.assertAllClose(read_value_[1][1], expected_values[1][1])
 def _quantum_circuit_spec(self):
     spec = {
         'alpha': specs.TensorSpec(shape=[1], dtype=tf.float32),
         'phi_g': specs.TensorSpec(shape=[1], dtype=tf.float32),
         'phi_e': specs.TensorSpec(shape=[1], dtype=tf.float32)
     }
     return spec
Ejemplo n.º 4
0
    def testReadWriteBatch(self):
        spec = [
            specs.TensorSpec([3], tf.float32, 'action'),
            [
                specs.TensorSpec([5], tf.float32, 'camera'),
                specs.TensorSpec([3, 2], tf.float32, 'lidar')
            ]
        ]
        replay_table = table.Table(spec, capacity=4)

        batch_size = 2
        expected_values = [
            1 * np.ones([batch_size] + spec[0].shape.as_list()),
            [
                2 * np.ones([batch_size] + spec[1][0].shape.as_list()),
                3 * np.ones([batch_size] + spec[1][1].shape.as_list())
            ]
        ]
        tensors = tf.nest.map_structure(
            lambda x: tf.convert_to_tensor(value=x, dtype=tf.float32),
            expected_values)

        write_op = replay_table.write(list(range(batch_size)), tensors)
        read_op = replay_table.read(list(range(batch_size)))
        self.evaluate(tf.compat.v1.global_variables_initializer())
        self.evaluate(write_op)
        read_value_ = self.evaluate(read_op)
        tf.nest.map_structure(self.assertAllClose, read_value_,
                              expected_values)
Ejemplo n.º 5
0
    def testReadWriteDict(self):
        spec = {
            'action': specs.TensorSpec([3], tf.float32, 'action'),
            'camera': specs.TensorSpec([5], tf.float32, 'camera'),
            'lidar': specs.TensorSpec([3, 2], tf.float32, 'lidar')
        }
        replay_table = table.Table(spec, capacity=3)

        variables = replay_table.variables()
        self.assertEqual(3, len(variables))
        self.assertAllEqual(
            ['Table/action:0', 'Table/camera:0', 'Table/lidar:0'],
            [v.name for v in variables])

        expected_values = {
            'action': 1 * np.ones(spec['action'].shape.as_list()),
            'camera': 2 * np.ones(spec['camera'].shape.as_list()),
            'lidar': 3 * np.ones(spec['lidar'].shape.as_list())
        }
        tensors = nest.map_structure(
            lambda x: tf.convert_to_tensor(x, dtype=tf.float32),
            expected_values)

        write_op = replay_table.write(0, tensors)
        read_op = replay_table.read(0)
        self.evaluate(tf.global_variables_initializer())
        self.evaluate(write_op)
        read_value_ = self.evaluate(read_op)
        nest.map_structure(self.assertAllClose, read_value_, expected_values)
Ejemplo n.º 6
0
 def _quantum_circuit_spec(self):
     spec = {
         'beta': specs.TensorSpec(shape=[2], dtype=tf.float32),
         'epsilon': specs.TensorSpec(shape=[2], dtype=tf.float32),
         'phi': specs.TensorSpec(shape=[1], dtype=tf.float32)
     }
     return spec
Ejemplo n.º 7
0
    def testWritePartialSlots(self):
        spec = [
            specs.TensorSpec([3], tf.float32, 'action'),
            [
                specs.TensorSpec([5], tf.float32, 'camera'),
                specs.TensorSpec([3, 2], tf.float32, 'lidar')
            ]
        ]
        replay_table = table.Table(spec, capacity=4)

        batch_size = 2

        action1 = 1 * np.ones([batch_size] + spec[0].shape.as_list())
        camera1 = 2 * np.ones([batch_size] + spec[1][0].shape.as_list())
        lidar1 = 3 * np.ones([batch_size] + spec[1][1].shape.as_list())
        write_op1 = replay_table.write(list(range(batch_size)),
                                       [action1, [camera1, lidar1]])

        lidar2 = 10 * np.ones([batch_size] + spec[1][1].shape.as_list())
        action2 = 20 * np.ones([batch_size] + spec[0].shape.as_list())
        write_op2 = replay_table.write(list(range(batch_size)),
                                       [lidar2, [action2]],
                                       ['lidar', ['action']])
        read_op = replay_table.read(list(range(batch_size)))
        self.evaluate(tf.compat.v1.global_variables_initializer())
        self.evaluate(write_op1)
        self.evaluate(write_op2)
        read_value_ = self.evaluate(read_op)
        expected_values = [action2, [camera1, lidar2]]
        tf.nest.map_structure(self.assertAllClose, read_value_,
                              expected_values)
Ejemplo n.º 8
0
    def testReadWriteNamedTuple(self):
        # pylint: disable=invalid-name
        Observation = collections.namedtuple('Observation',
                                             ['action', 'camera', 'lidar'])
        # pylint: enable=invalid-name
        spec = Observation(action=specs.TensorSpec([3], tf.float32, 'action'),
                           camera=specs.TensorSpec([5], tf.float32, 'camera'),
                           lidar=specs.TensorSpec([3, 2], tf.float32, 'lidar'))
        replay_table = table.Table(spec, capacity=3)

        variables = replay_table.variables()
        self.assertEqual(3, len(variables))
        self.assertAllEqual(
            ['Table/action:0', 'Table/camera:0', 'Table/lidar:0'],
            [v.name for v in variables])

        expected_values = Observation(
            action=1 * np.ones(spec.action.shape.as_list()),
            camera=2 * np.ones(spec.camera.shape.as_list()),
            lidar=3 * np.ones(spec.lidar.shape.as_list()))
        tensors = tf.nest.map_structure(
            lambda x: tf.convert_to_tensor(value=x, dtype=tf.float32),
            expected_values)

        write_op = replay_table.write(0, tensors)
        read_op = replay_table.read(0)
        self.evaluate(tf.compat.v1.global_variables_initializer())
        self.evaluate(write_op)
        read_value_ = self.evaluate(read_op)
        tf.nest.map_structure(self.assertAllClose, read_value_,
                              expected_values)
Ejemplo n.º 9
0
 def test_summary_no_exception(self):
   """Tests that Network.summary() does not throw an exception."""
   observation_spec = specs.TensorSpec([1], tf.float32, 'observation')
   action_spec = specs.TensorSpec([2], tf.float32, 'action')
   net = MockNetwork(observation_spec, action_spec)
   net.create_variables()
   net.summary()
Ejemplo n.º 10
0
 def _control_circuit_spec(self):
     spec = {
         'alpha': specs.TensorSpec(shape=[2], dtype=tf.float32),
         'theta': specs.TensorSpec(shape=[7], dtype=tf.float32),
         'phi': specs.TensorSpec(shape=[7], dtype=tf.float32)
     }
     return spec
Ejemplo n.º 11
0
    def testReadWriteSingle(self):
        spec = [
            specs.TensorSpec([3], tf.float32, 'action'),
            [
                specs.TensorSpec([5], tf.float32, 'camera'),
                specs.TensorSpec([3, 2], tf.float32, 'lidar')
            ]
        ]
        replay_table = table.Table(spec, capacity=3)
        variables = replay_table.variables()
        self.assertEqual(3, len(variables))
        self.assertAllEqual(
            ['Table/action:0', 'Table/camera:0', 'Table/lidar:0'],
            [v.name for v in variables])

        expected_values = [
            1 * np.ones(spec[0].shape.as_list()),
            [
                2 * np.ones(spec[1][0].shape.as_list()),
                3 * np.ones(spec[1][1].shape.as_list())
            ]
        ]
        tensors = tf.nest.map_structure(
            lambda x: tf.convert_to_tensor(value=x, dtype=tf.float32),
            expected_values)

        write_op = replay_table.write(0, tensors)
        read_op = replay_table.read(0)
        self.evaluate(tf.compat.v1.global_variables_initializer())
        self.evaluate(write_op)
        read_value_ = self.evaluate(read_op)
        tf.nest.map_structure(self.assertAllClose, read_value_,
                              expected_values)
 def _control_circuit_spec(self):
     spec = {
         'alpha': specs.TensorSpec(shape=[2], dtype=tf.float32),
         'beta': specs.TensorSpec(shape=[2], dtype=tf.float32),
         'epsilon': specs.TensorSpec(shape=[2], dtype=tf.float32)
     }
     return spec
Ejemplo n.º 13
0
    def __init__(self, time_step_spec, action_script):
        """
        Input:
            time_step_spec -- see tf-agents docs

            action_script -- module or class with attributes 'alpha', 'beta',
                             'epsilon', 'phi' and 'period'.
        """
        self.period = action_script.period  # periodicity of the protocol

        # load the script of actions and convert to tensors
        self.script = action_script.script
        for a, val in self.script.items():
            self.script[a] = tf.constant(val, dtype=tf.float32)

        # Calculate specs and call init of parent class
        action_spec = {
            a: specs.TensorSpec(shape=C.shape[1:], dtype=tf.float32)
            for a, C in self.script.items()
        }

        policy_state_spec = specs.TensorSpec(shape=[], dtype=tf.int32)

        super(ScriptedPolicy, self).__init__(time_step_spec,
                                             action_spec,
                                             policy_state_spec,
                                             automatic_state_reset=True)
        self._policy_info = ()
 def _data_spec(self):
     return [
         specs.TensorSpec([3], tf.float32, 'action'),
         [
             specs.TensorSpec([5], tf.float32, 'lidar'),
             specs.TensorSpec([3, 2], tf.float32, 'camera')
         ]
     ]
Ejemplo n.º 15
0
 def test_variables_calls_build(self):
     observation_spec = specs.TensorSpec([1], tf.float32, 'observation')
     action_spec = specs.TensorSpec([2], tf.float32, 'action')
     net = MockNetwork(observation_spec, action_spec)
     self.assertFalse(net.built)
     variables = net.variables
     self.assertTrue(net.built)
     self.assertLen(variables, 2)
Ejemplo n.º 16
0
    def __init__(
            self,
            *args,
            # Optional kwargs
            H=1,
            T=4,
            attn_step=1,
            episode_length=20,
            batch_size=50,
            init="vac",
            reward_kwargs={'reward_mode': 'zero'},
            encoding='square',
            phase_space_rep='wigner',
            **kwargs):
        """
        Args:
            H (int, optional): Horizon for history returned in observations. Defaults to 1.
            T (int, optional): Periodicity of the 'clock' observation. Defaults to 4.
            attn_step (int, optional): step size for hard-coded attention to
                measurement outcomes. For example, set to 4 to return history 
                of measurement oucomes separated by 4 steps -- when the same 
                stabilizer is measured in the square code. In hexagonal code 
                this can be 2. Defaults to 1.
            episode_length (int, optional): Number of iterations in training episode. Defaults to 20.
            batch_size (int, optional): Vectorized minibatch size. Defaults to 50.
            init (str, optional): Initial quantum state of system. Defaults to "vac".
            reward_kwargs (dict, optional): optional dictionary of parameters 
                for the reward function of RL agent.
            encoding (str, optional): Type of GKP lattice. Defaults to "square".
            phase_space_rep (str, optional): phase space representation to use
                for rendering ('wigner' or 'CF')
            
        """
        # Default simulation parameters
        self.H = H
        self.T = T
        self.attn_step = attn_step
        self.episode_length = episode_length
        self.batch_size = batch_size
        self.init = init
        self.phase_space_rep = phase_space_rep

        self.setup_reward(reward_kwargs)
        self.define_stabilizer_code(encoding)
        self._epoch = 0

        # Define action and observation specs
        self.control_circuit = self._control_circuit
        action_spec = self._control_circuit_spec

        observation_spec = {
            'msmt': specs.TensorSpec(shape=[self.H], dtype=tf.float32),
            'clock': specs.TensorSpec(shape=[self.T], dtype=tf.float32),
            'const': specs.TensorSpec(shape=[1], dtype=tf.float32)
        }
        time_step_spec = ts.time_step_spec(observation_spec)

        super().__init__(time_step_spec, action_spec, self.batch_size)
 def _control_circuit_spec(self):
     spec = {
         'beta': specs.TensorSpec(shape=[2], dtype=tf.float32),
         'eps1': specs.TensorSpec(shape=[2], dtype=tf.float32),
         'eps2': specs.TensorSpec(shape=[2], dtype=tf.float32),
         'phi': specs.TensorSpec(shape=[1], dtype=tf.float32),
         'theta': specs.TensorSpec(shape=[1], dtype=tf.float32)
     }
     return spec
 def _get_mock_spec(self):
     spec = [
         specs.TensorSpec([3], tf.float32, 'action'),
         [
             specs.TensorSpec([5], tf.float32, 'lidar'),
             specs.TensorSpec([3, 2], tf.float32, 'camera')
         ]
     ]
     return spec
Ejemplo n.º 19
0
 def test_create_variables(self):
   observation_spec = specs.TensorSpec([1], tf.float32, 'observation')
   action_spec = specs.TensorSpec([2], tf.float32, 'action')
   net = MockNetwork(observation_spec, action_spec)
   self.assertFalse(net.built)
   with self.assertRaises(ValueError):
     net.variables  # pylint: disable=pointless-statement
   net.create_variables()
   self.assertTrue(net.built)
   self.assertLen(net.variables, 2)
   self.assertLen(net.trainable_variables, 1)
Ejemplo n.º 20
0
 def testNetworkCreate(self):
   observation_spec = specs.TensorSpec([1], tf.float32, 'observation')
   action_spec = specs.TensorSpec([2], tf.float32, 'action')
   net = MockNetwork(observation_spec, action_spec)
   self.assertFalse(net.built)
   with self.assertRaises(ValueError):
     net.variables  # pylint: disable=pointless-statement
   output_spec = network.create_variables(net)
   # MockNetwork adds some variables to observation, which has shape [bs, 1]
   self.assertEqual(output_spec, tf.TensorSpec([1], dtype=tf.float32))
   self.assertTrue(net.built)
   self.assertLen(net.variables, 2)
   self.assertLen(net.trainable_variables, 1)
Ejemplo n.º 21
0
    def testSaveRestore(self):
        spec = [
            specs.TensorSpec([3], tf.float32),
            specs.TensorSpec([5], tf.float32, 'lidar'),
            specs.TensorSpec([3, 2], tf.float32, 'lidar')
        ]
        replay_table = table.Table(spec, capacity=3)

        self.evaluate(tf.compat.v1.global_variables_initializer())
        directory = self.get_temp_dir()
        prefix = os.path.join(directory, 'table')
        root = tf.train.Checkpoint(table=replay_table)
        save_path = root.save(prefix)
        root.restore(save_path).assert_consumed().run_restore_ops()
Ejemplo n.º 22
0
  def test_create_variables(self):
    observation_spec = specs.TensorSpec([1], tf.float32, 'observation')
    action_spec = specs.TensorSpec([1], tf.float32, 'action')
    input_spec = (observation_spec, action_spec)
    state_spec = tf.TensorSpec([], tf.float32, 'state')
    net = MockStateFullNetwork(input_spec, state_spec)
    self.assertFalse(net.built)
    with self.assertRaises(ValueError):
      net.variables  # pylint: disable=pointless-statement
    output_spec = net.create_variables()

    self.assertEqual(output_spec, tf.TensorSpec([1, 1], dtype=tf.float32))
    self.assertTrue(net.built)
    self.assertLen(net.variables, 2)
    self.assertLen(net.trainable_variables, 1)
Ejemplo n.º 23
0
    def get_policy(self):
        def policy_fn(observation, dtype=tf.int32):
            if tf.rank(observation) < 1:
                observation = [observation]

            if self._latent_policy:
                embed = self._embed_state(observation)
            else:
                embed = tf.one_hot(observation, self._num_states)
            distribution = tf.matmul(
                embed, tf.nn.softmax(self._embed_policy_logits, axis=-1))

            policy_info = {'distribution': distribution}
            return (tfp.distributions.Categorical(probs=distribution,
                                                  dtype=dtype), policy_info)

        policy_info_spec = {
            'log_probability':
            specs.TensorSpec([], tf.float32),
            'distribution':
            specs.BoundedTensorSpec([self._num_actions],
                                    tf.float32,
                                    minimum=0.0,
                                    maximum=1.0)
        }
        return policy_fn, policy_info_spec
    def testSampleSingleCorrectProbabilityAsDataset(self, buffer_batch_size):
        max_length = 3
        spec = specs.TensorSpec([], tf.int32, 'action')
        replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
            spec, batch_size=buffer_batch_size, max_length=max_length)

        actions = tf.stack([tf.Variable(0).count_up_to(9)] * buffer_batch_size)
        add_op = replay_buffer.add_batch(actions)

        ds = replay_buffer.as_dataset()
        itr = ds.make_initializable_iterator()
        _, buffer_info = itr.get_next()
        probabilities = buffer_info.probabilities

        with self.cached_session() as sess:
            tf.global_variables_initializer().run()
            itr.initializer.run()
            num_adds = 5
            for i in range(1, num_adds):
                sess.run(add_op)
                probabilities_ = sess.run(probabilities)
                expected_probability = (
                    1. /
                    min(i * buffer_batch_size, max_length * buffer_batch_size))
                self.assertAllClose(expected_probability, probabilities_)
    def testSampleSingleCorrectProbabilityAsDataset(self, buffer_batch_size):
        max_length = 3
        spec = specs.TensorSpec([], tf.int32, 'action')
        replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
            spec, batch_size=buffer_batch_size, max_length=max_length)

        actions = tf.stack([tf.Variable(0).count_up_to(9)] * buffer_batch_size)

        self.evaluate(tf.compat.v1.global_variables_initializer())

        ds = replay_buffer.as_dataset()
        if tf.executing_eagerly():
            add_op = lambda: replay_buffer.add_batch(actions)
            itr = iter(ds)
            sample = lambda: next(itr)
        else:
            add_op = replay_buffer.add_batch(actions)
            itr = tf.compat.v1.data.make_initializable_iterator(ds)
            self.evaluate(itr.initializer)
            sample = itr.get_next()

        num_adds = 5
        for i in range(1, num_adds):
            self.evaluate(add_op)
            probabilities_ = self.evaluate(sample)[1].probabilities
            expected_probability = (
                1. /
                min(i * buffer_batch_size, max_length * buffer_batch_size))
            self.assertAllClose(expected_probability, probabilities_)
    def testSampleSingleCorrectProbability(self, buffer_batch_size):
        max_length = 3
        spec = specs.TensorSpec([], tf.int32, 'action')
        replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
            spec, batch_size=buffer_batch_size, max_length=max_length)

        actions = tf.stack([tf.Variable(0).count_up_to(9)] * buffer_batch_size)

        @common.function
        def add(actions):
            replay_buffer.add_batch(actions)

        @common.function
        def probabilities():
            _, buffer_info = replay_buffer.get_next()
            return buffer_info.probabilities

        self.evaluate(tf.compat.v1.global_variables_initializer())

        num_adds = 5
        for i in range(1, num_adds):
            self.evaluate(add(actions))
            probabilities_ = self.evaluate(probabilities())
            expected_probability = (
                1. /
                min(i * buffer_batch_size, max_length * buffer_batch_size))
            self.assertAllClose(expected_probability, probabilities_)
    def testMultiStepStackedBatchedSampling(self, batch_size):
        spec = specs.TensorSpec([], tf.int64, 'action')
        replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
            spec, batch_size=batch_size)

        @common.function(autograph=True)
        def add_data():
            for i in tf.range(10, dtype=tf.int64):
                replay_buffer.add_batch(
                    tf.ones((batch_size, ), dtype=tf.int64) * i)

        self.evaluate(tf.compat.v1.global_variables_initializer())
        self.evaluate(add_data())

        if tf.executing_eagerly():
            steps = lambda: replay_buffer._get_next(
                3,  # pylint: disable=g-long-lambda
                num_steps=2,
                time_stacked=True)[0]
        else:
            steps, _ = replay_buffer._get_next(3,
                                               num_steps=2,
                                               time_stacked=True)
        self.assertEqual(self.evaluate(steps).shape, (3, 2))

        for _ in range(100):
            steps_ = self.evaluate(steps)
            self.assertAllEqual((steps_[:, 0] + 1) % 10, steps_[:, 1])
Ejemplo n.º 28
0
 def __init__(self, time_step_spec):
     sim_dir = r'E:\VladGoogleDrive\Qulab\GKP\sims'
     # name = 'Benchmarking_HybridMarkovian4Rounds\supervised_lstm\lstm.hdf5'
     name = 'Benchmarking_HybridMarkovian4Rounds\supervised_dnn\dnn.hdf5'
     # name = 'Benchmarking_HybridMarkovian4Rounds\supervised_linear\shallow.hdf5'
     self.model = keras.models.load_model(os.path.join(sim_dir, name))
     action_spec = specs.TensorSpec(shape=[5], dtype=tf.float32)
     super(SupervisedNeuralNet, self).__init__(time_step_spec, action_spec)
Ejemplo n.º 29
0
 def _control_circuit_spec(self):
     spec = {  # SBS params
         'beta': specs.TensorSpec(shape=[4, 2], dtype=tf.float32),
         'phi': specs.TensorSpec(shape=[4, 2], dtype=tf.float32),
         'flip': specs.TensorSpec(shape=[4, 2], dtype=tf.float32),
         'detune': specs.TensorSpec(shape=[4, 2], dtype=tf.float32),
         # Murch params
         'Murch_phi': specs.TensorSpec(shape=[2], dtype=tf.float32),
         'Murch_amp': specs.TensorSpec(shape=[2], dtype=tf.float32),
         'Murch_detune_MHz': specs.TensorSpec(shape=[2], dtype=tf.float32),
         # misc
         'cavity_phase': specs.TensorSpec(shape=[1], dtype=tf.float32),
         'Kerr_drive_amp': specs.TensorSpec(shape=[1], dtype=tf.float32)
     }
     return spec
Ejemplo n.º 30
0
    def testDuplicateSpecNames(self):
        spec = [
            specs.TensorSpec([3], tf.float32, 'lidar'),
            specs.TensorSpec([5], tf.float32, 'lidar'),
            specs.TensorSpec([3, 2], tf.float32, 'lidar')
        ]
        replay_table = table.Table(spec, capacity=3)

        variables = replay_table.variables()
        self.assertEqual(3, len(variables))
        self.assertAllEqual(
            ['Table/lidar:0', 'Table/lidar_1:0', 'Table/lidar_2:0'],
            [v.name for v in variables])

        expected_slots = ['lidar', 'lidar_1', 'lidar_2']
        self.assertAllEqual(replay_table.slots, expected_slots)
        tensors = replay_table.read(0, expected_slots)
        tf.nest.map_structure(lambda x, y: self.assertEqual(x.shape, y.shape),
                              spec, tensors)