Пример #1
0
def _BuildDummyPipelineCnn(num_splits=4,
                           num_micro_batches=8,
                           micro_batch_size=None):
    """Construct a dummy layer that consist of 16 3x3 conv layers.

  In addition, each conv layer increments a count every time step.

  Args:
    num_splits: number of cells for pipeline cnn
    num_micro_batches: number of time steps.
    micro_batch_size: Size of a micro batch.

  Returns:
    A PipeliningLayer layer.
  """
    assert num_splits in [1, 2, 4, 8, 16]
    num_layers = 16
    layers = []
    for i in range(num_layers):
        layers.append(_SimpyLayer.Params().Set(name='layer_{}'.format(i)))

    if num_splits == 1:
        p = FeatureExtractionLayer.Params().Set(name='seq', sub=layers)
    else:
        cell_tpl = []
        layers_per_split = num_layers // num_splits
        num_act_outputs = 0
        num_act_inputs = 0
        act_fetch_layers = None
        for split in range(num_splits):
            sub = layers[split * layers_per_split:(split + 1) *
                         layers_per_split]
            if split == 0:
                sub.append(FetchLayer.Params().Set(name='fetch'))
                num_act_outputs = 1
                act_fetch_layers = ['fetch']
            else:
                num_act_inputs = 1
                act_fetch_layers = []
            split_layer = FeatureExtractionLayer.Params().Set(
                name='split_{}'.format(split),
                sub=sub,
                act_fetch_layers=act_fetch_layers,
                num_act_inputs=num_act_inputs,
                num_act_outputs=num_act_outputs)
            cell_tpl.append(split_layer)
        p = PipeliningLayer.Params().Set(name='pipeline',
                                         num_micro_batches=num_micro_batches,
                                         micro_batch_size=micro_batch_size,
                                         cell_tpl=cell_tpl,
                                         before_tpl=[])
    layer = p.Instantiate()
    return layer
Пример #2
0
    def _verify_timestep_counts(self,
                                num_splits,
                                auto_partition=False,
                                micro_batch_size=None):
        num_micro_batches = 8
        batch_size = 16
        with self.session(graph=tf.Graph()) as sess:
            tf.random.set_seed(1245)
            inputs = tf.random.uniform([batch_size, 8, 8, 1], seed=12345)
            if auto_partition:
                layers = [
                    _SimpyLayer.Params().Set(name='layer_{}'.format(i))
                    for i in range(16)
                ]
                net = PipeliningLayer.Params().Set(
                    name='pipeline',
                    num_micro_batches=num_micro_batches,
                    cell_tpl=_Partition(layers, num_splits,
                                        tshape.Shape([batch_size, 8, 8,
                                                      1]))).Instantiate()
            else:
                net = _BuildDummyPipelineCnn(
                    num_splits=num_splits,
                    micro_batch_size=micro_batch_size,
                    num_micro_batches=num_micro_batches)
            endpoints = net.FPropDefaultTheta(inputs)
            if isinstance(endpoints, (list, tuple)):
                logits, aux_logits = endpoints
            else:
                logits = endpoints
                aux_logits = None
            loss = tf.reduce_mean(logits)
            grads = tf.gradients(loss, tf.trainable_variables())
            grad_norm = tf.sqrt(py_utils.SumSquared(grads))
            ts = net.GetAccumulatorValues().Flatten()

            sess.run(tf.global_variables_initializer())
            grad_norm_val, ts_vals = sess.run([grad_norm, ts])
            test_utils.CompareToGoldenSingleFloat(self, 0.268087,
                                                  grad_norm_val)
            # Accumulator values should be equal to number of time steps in pipeline.
            for ts_val in list(ts_vals):
                expected_ts = num_micro_batches if num_splits > 1 else 1
                self.assertEqual(ts_val, expected_ts)
            if aux_logits is not None:
                aux_logit_tensor = sess.run(aux_logits)
                self.assertEqual(aux_logit_tensor.shape, (batch_size, 8, 8, 1))
Пример #3
0
 def testDeterministicDropoutInsideFunctionalWhile(self):
     with self.session() as sess:
         cells = FeatureExtractionLayer.Params().Set(
             name='cell',
             sub=[
                 DeterministicDropoutLayer.Params().Set(name='dropout',
                                                        keep_prob=0.7)
             ])
         p = PipeliningLayer.Params().Set(name='pipe', cell_tpl=[cells])
         x = tf.ones([2, 3], dtype=tf.float32)
         model = p.cls(p)
         y = model.FPropDefaultTheta(x)
         py_utils.GetOrCreateGlobalStep()
         tf.global_variables_initializer().run()
         y_val = sess.run(y)
         self.assertAllClose([
             [1.0 / 0.7, 1.0 / 0.7, 1.0 / 0.7],
             [0.0, 0.0, 1.0 / 0.7],
         ], y_val)
         self.assertAllClose(5.7142859, np.sum(y_val))
Пример #4
0
 def testDummyPipelineCnnNestedMapInput(self):
     batch_size = 16
     num_layers = 4
     cells = []
     with self.session(graph=tf.Graph()) as sess:
         for i in range(num_layers):
             cells.append(_SimpyLayerWithNestedMapInput.Params().Set(
                 name='layer_{}'.format(i)))
         p = PipeliningLayer.Params().Set(name='pipeline',
                                          num_micro_batches=8,
                                          micro_batch_size=2,
                                          nested_map_fprop=True,
                                          cell_tpl=cells,
                                          before_tpl=[])
         layer = p.Instantiate()
         tf.random.set_seed(1245)
         inputs = tf.random.uniform([batch_size, 8, 8, 1], seed=12345)
         outputs = layer.FPropDefaultTheta(
             py_utils.NestedMap(vec=inputs, paddings=None))
         sess.run(tf.global_variables_initializer())
         sess.run(outputs.vec)
         self.assertEqual(outputs.vec.shape, (batch_size, 8, 8, 1))
Пример #5
0
 def testDropoutInRecurrent(self, splits=1, num_micro_batches=1):
   assert splits in [1, 2, 4]
   with self.session() as sess:
     tf.set_random_seed(12345)
     num_layers = 4
     py_utils.GetOrCreateGlobalStep()
     # Build a model with 4 dropout layers.
     layers = []
     for l in range(num_layers):
       layers.append(DeterministicDropoutLayer.Params().Set(
           name='dropout_{}'.format(l), keep_prob=0.7))
     # Divide the model into splits partitions.
     cell_tpl = []
     layers_per_split = num_layers // splits
     for i in range(splits):
       sub = layers[i * layers_per_split:(i + 1) * layers_per_split]
       cell_tpl.append(FeatureExtractionLayer.Params().Set(
           name='cell_{}'.format(i), sub=sub))
     # Parallelize partitions using pipeline.
     p = PipeliningLayer.Params().Set(
         name='pipeline',
         num_micro_batches=num_micro_batches,
         cell_tpl=cell_tpl)
     # Fake input
     x = tf.ones([2, 3])
     # Construct weights.
     w = tf.get_variable(
         'w', shape=[2, 3], initializer=tf.constant_initializer([[1] * 3] * 2))
     mdl = p.cls(p)
     y = mdl.FPropDefaultTheta(x * w)
     # Construct loss function such that gradients = final activation.
     loss = tf.reduce_sum(y)
     grads = py_utils.ComputeGradients(loss, py_utils.NestedMap(w=w))
     tf.global_variables_initializer().run()
     y_val = sess.run(y)
     grads_val = sess.run(grads)['w'][1]
     self.assertAllClose(y_val, grads_val)