Ejemplo n.º 1
0
  def _LayerWithActivationProcessing(self,
                                     input_tensor=None,
                                     scope='test',
                                     post_activation_bypass=False):

    batch_size, height, width, depth = 5, 128, 128, 3
    if input_tensor is None:
      input_tensor = array_ops.zeros((batch_size, height, width, depth))
    weight_init = init_ops.truncated_normal_initializer
    with ops.name_scope(scope):
      output = layers.conv2d(
          input_tensor,
          depth, [5, 5],
          padding='SAME',
          weights_initializer=weight_init(0.09),
          activation_fn=None,
          normalizer_fn=None,
          biases_initializer=None)

      output = layers.batch_norm(
          output, center=True, scale=True, decay=1.0 - 0.003, fused=True)

      output = nn_ops.relu6(output)
      scaled_output1 = math_ops.mul(2.0, output)
      scaled_output2 = math_ops.mul(3.0, output)
      output = scaled_output1 + scaled_output2
    return output
Ejemplo n.º 2
0
    def _LayerWithActivationProcessing(self,
                                       input_tensor=None,
                                       scope='test',
                                       post_activation_bypass=False):

        batch_size, height, width, depth = 5, 128, 128, 3
        if input_tensor is None:
            input_tensor = array_ops.zeros((batch_size, height, width, depth))
        weight_init = init_ops.truncated_normal_initializer
        with ops.name_scope(scope):
            output = layers.conv2d(input_tensor,
                                   depth, [5, 5],
                                   padding='SAME',
                                   weights_initializer=weight_init(0.09),
                                   activation_fn=None,
                                   normalizer_fn=None,
                                   biases_initializer=None)

            output = layers.batch_norm(output,
                                       center=True,
                                       scale=True,
                                       decay=1.0 - 0.003,
                                       fused=True)

            output = nn_ops.relu6(output)
            scaled_output1 = math_ops.mul(2.0, output)
            scaled_output2 = math_ops.mul(3.0, output)
            output = scaled_output1 + scaled_output2
        return output
Ejemplo n.º 3
0
  def _testMultiplePartitionedVariables(self, is_training):
    # When weights are partitioned into multiple partitions the weights variable
    # is followed by a identity -> concat -> identity to group the partitions.
    partitioner = partitioned_variables.fixed_size_partitioner(2)
    graph = ops.Graph()
    with graph.as_default():
      with variable_scope.variable_scope('part', partitioner=partitioner):
        batch_size, height, width, depth = 5, 128, 128, 3
        input1 = array_ops.zeros((batch_size, height, width, depth))
        input2 = array_ops.zeros((batch_size, height / 2, width / 2, 32))
        conv = conv2d(
            input1,
            32, [5, 5],
            stride=2,
            padding='SAME',
            weights_initializer=self._WeightInit(0.09),
            activation_fn=None,
            scope='test/test')
        node = math_ops.add(conv, input2, name='test/add')
        node = nn_ops.relu6(node, name='test/relu6')

      quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
      # Check that the weight's quant node was added.
      op_names = [op.name for op in graph.get_operations()]
      self.assertTrue(
          'part/test/test/weights_quant/FakeQuantWithMinMaxVars' in op_names)
Ejemplo n.º 4
0
  def _TestInsertQuantOpForAddAfterConv2d(self, is_training):
    graph = ops.Graph()
    with graph.as_default():
      batch_size, height, width, depth = 5, 128, 128, 3
      input1 = array_ops.zeros((batch_size, height, width, depth))
      input2 = array_ops.zeros((batch_size, height / 2, width / 2, 32))
      conv = conv2d(input1, 32, [5, 5], stride=2, padding='SAME',
                    weights_initializer=self._WeightInit(0.09),
                    activation_fn=None, scope='test/test')
      node = math_ops.add(conv, input2, name='test/add')
      node = nn_ops.relu6(node, name='test/relu6')
      update_barrier = control_flow_ops.no_op(name='update_barrier')
      with ops.control_dependencies([update_barrier]):
        array_ops.identity(node, name='control_dependency')

    quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)

    quantization_node_name = 'FakeQuantWithMinMaxVars'
    conv_quant = graph.get_operation_by_name('test/test/conv_quant/' +
                                             quantization_node_name)
    self.assertEqual(conv_quant.type, quantization_node_name)

    # Scan through all FakeQuant operations, ensuring that the activation
    # isn't in the consumers of the operation. Since activations are folded
    # the preceding operation during inference, the FakeQuant operation after
    # the activation is all that is needed.
    for op in graph.get_operations():
      if op.type == quantization_node_name:
        quant_op = graph.get_operation_by_name(op.name)
        consumers = []
        for output in quant_op.outputs:
          consumers.extend(output.consumers())

        self.assertNotIn('test/relu6', [c.name for c in consumers])
Ejemplo n.º 5
0
 def _testRelu6(self, np_features, use_gpu=False):
   np_relu6 = self._npRelu6(np_features)
   with self.test_session(use_gpu=use_gpu):
     relu6 = nn_ops.relu6(np_features)
     tf_relu6 = relu6.eval()
   self.assertAllClose(np_relu6, tf_relu6)
   self.assertShapeEqual(np_relu6, relu6)
Ejemplo n.º 6
0
    def _TestDeviceName(self, fn):
        graph = ops.Graph()
        with graph.as_default():
            batch_size, height, width, depth = 5, 128, 128, 3
            inputs = array_ops.zeros((batch_size, height, width, depth))
            conv = layers.conv2d(inputs,
                                 32, [5, 5],
                                 stride=2,
                                 padding='SAME',
                                 weights_initializer=self._WeightInit(0.09),
                                 activation_fn=None,
                                 scope='test')
            _ = nn_ops.relu6(conv)

        device_name = '/job:oink/task:0/device:CPU:0'
        q_graph = fn(graph, device_name_or_function=device_name)

        orig_variable_names = set([
            v.name
            for v in graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
        ])
        q_variables = q_graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
        # Ensure that variables were added.
        self.assertTrue(len(orig_variable_names) < len(q_variables))
        # All added variables should have the specified device name.
        for var in q_variables:
            if var.name not in orig_variable_names:
                self.assertEqual(var.device, device_name)
  def _TestDeviceName(self, is_training):
    graph = ops.Graph()
    with graph.as_default():
      batch_size, height, width, depth = 5, 128, 128, 3
      inputs = array_ops.zeros((batch_size, height, width, depth))
      conv = layers.conv2d(
          inputs,
          32, [5, 5],
          stride=2,
          padding='SAME',
          weights_initializer=self._WeightInit(0.09),
          activation_fn=None,
          scope='test')
      _ = nn_ops.relu6(conv)

    device_name = '/job:oink/task:0/device:CPU:0'
    if is_training:
      q_graph = quantize_graph.create_training_graph(
          graph, device_name_or_function=device_name)
    else:
      q_graph = quantize_graph.create_eval_graph(
          graph, device_name_or_function=device_name)

    orig_variable_names = set(
        [v.name for v in graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
    q_variables = q_graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
    # Ensure that variables were added.
    self.assertTrue(len(orig_variable_names) < len(q_variables))
    # All added variables should have the specified device name.
    for var in q_variables:
      if var.name not in orig_variable_names:
        self.assertEqual(var.device, device_name)
Ejemplo n.º 8
0
    def testInsertQuantOpFailsWhenOpsNotConnected(self):
        graph = ops.Graph()
        with graph.as_default():
            batch_size, height, width, depth = 5, 128, 128, 3
            inputs = array_ops.zeros((batch_size, height, width, depth))
            conv = conv2d(inputs,
                          32, [5, 5],
                          stride=2,
                          padding='SAME',
                          weights_initializer=self._WeightInit(0.09),
                          activation_fn=None,
                          scope='test')
            relu = nn_ops.relu6(inputs)

        context = quantize._QuantizeContext(graph=graph,
                                            weight_bits=8,
                                            weight_narrow_range=True,
                                            activation_bits=8)
        # Inserting a quantization op between two unconnected ops should fail with
        # ValueError.
        with self.assertRaises(ValueError) as err:
            context._InsertQuantOp('test', conv.op, [relu.op],
                                   'FailingQuantOp')
        self.assertEqual(str(err.exception),
                         'Some inputs not quantized for ops: [Relu6]')
Ejemplo n.º 9
0
    def _testMultiplePartitionedVariables(self, is_training):
        # When weights are partitioned into multiple partitions the weights variable
        # is followed by a identity -> concat -> identity to group the partitions.
        partitioner = partitioned_variables.fixed_size_partitioner(2)
        graph = ops.Graph()
        with graph.as_default():
            with variable_scope.variable_scope('part',
                                               partitioner=partitioner):
                batch_size, height, width, depth = 5, 128, 128, 3
                input1 = array_ops.zeros((batch_size, height, width, depth))
                input2 = array_ops.zeros(
                    (batch_size, height / 2, width / 2, 32))
                conv = conv2d(input1,
                              32, [5, 5],
                              stride=2,
                              padding='SAME',
                              weights_initializer=self._WeightInit(0.09),
                              activation_fn=None,
                              scope='test/test')
                node = math_ops.add(conv, input2, name='test/add')
                node = nn_ops.relu6(node, name='test/relu6')

            quantize.Quantize(graph,
                              is_training,
                              weight_bits=8,
                              activation_bits=8)
            # Check that the weight's quant node was added.
            op_names = [op.name for op in graph.get_operations()]
            self.assertTrue(
                'part/test/test/weights_quant/FakeQuantWithMinMaxVars' in
                op_names)
Ejemplo n.º 10
0
  def _TestInsertQuantOpForAddAfterConv2d(self, is_training):
    graph = ops.Graph()
    with graph.as_default():
      batch_size, height, width, depth = 5, 128, 128, 3
      input1 = array_ops.zeros((batch_size, height, width, depth))
      input2 = array_ops.zeros((batch_size, height / 2, width / 2, 32))
      conv = conv2d(input1, 32, [5, 5], stride=2, padding='SAME',
                    weights_initializer=self._WeightInit(0.09),
                    activation_fn=None, scope='test/test')
      node = math_ops.add(conv, input2, name='test/add')
      node = nn_ops.relu6(node, name='test/relu6')
      update_barrier = control_flow_ops.no_op(name='update_barrier')
      with ops.control_dependencies([update_barrier]):
        array_ops.identity(node, name='control_dependency')

    quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)

    quantization_node_name = 'FakeQuantWithMinMaxVars'
    conv_quant = graph.get_operation_by_name('test/test/conv_quant/' +
                                             quantization_node_name)
    self.assertEqual(conv_quant.type, quantization_node_name)

    # Scan through all FakeQuant operations, ensuring that the activation
    # isn't in the consumers of the operation. Since activations are folded
    # the preceding operation during inference, the FakeQuant operation after
    # the activation is all that is needed.
    for op in graph.get_operations():
      if op.type == quantization_node_name:
        quant_op = graph.get_operation_by_name(op.name)
        consumers = []
        for output in quant_op.outputs:
          consumers.extend(output.consumers())

        self.assertNotIn('test/relu6', [c.name for c in consumers])
Ejemplo n.º 11
0
 def _testRelu6(self, np_features, use_gpu=False):
     np_relu6 = self._npRelu6(np_features)
     with self.test_session(use_gpu=use_gpu):
         relu6 = nn_ops.relu6(np_features)
         tf_relu6 = relu6.eval()
     self.assertAllClose(np_relu6, tf_relu6)
     self.assertShapeEqual(np_relu6, relu6)
Ejemplo n.º 12
0
    def _TestOverlappingPostActivationBypassQuantized(self, is_training):
        graph = ops.Graph()
        with graph.as_default():
            batch_size, height, width, depth = 5, 128, 128, 3
            conv_input = array_ops.zeros((batch_size, height, width, depth))
            conv1 = conv2d(conv_input,
                           32, [5, 5],
                           stride=2,
                           padding='SAME',
                           weights_initializer=self._WeightInit(0.09),
                           activation_fn=nn_ops.relu6,
                           scope='test/test1')

            # The bypass of this conv is the post activation bypass of the previous
            # conv.
            conv2 = conv2d(conv_input,
                           32, [5, 5],
                           stride=2,
                           padding='SAME',
                           weights_initializer=self._WeightInit(0.09),
                           activation_fn=None,
                           scope='test/test2')

            bypass_tensor = math_ops.add(conv1, conv2, name='test/add')
            _ = nn_ops.relu6(bypass_tensor, name='test/output')

            quantize.Quantize(graph,
                              is_training,
                              weight_bits=8,
                              activation_bits=8)

            # Ensure that the bypass node is preceded by a FakeQuantWithMinMaxVar
            # operation, and NOT followed by one.
            self.assertTrue('FakeQuantWithMinMaxVars' not in
                            [c.type for c in bypass_tensor.consumers()])
            self.assertTrue('FakeQuantWithMinMaxVars' in
                            [i.op.type for i in bypass_tensor.op.inputs])

            # Ensure that all the convs and activations are quantized.
            op_names = [op.name for op in graph.get_operations()]
            self.assertTrue(
                'test/test1/weights_quant/FakeQuantWithMinMaxVars' in op_names)
            self.assertTrue(
                'test/test2/weights_quant/FakeQuantWithMinMaxVars' in op_names)
            self.assertTrue(
                'test/test1/act_quant/FakeQuantWithMinMaxVars' in op_names)
            self.assertTrue(
                'test/act_quant/FakeQuantWithMinMaxVars' in op_names)
            self.assertEqual(
                'Relu6',
                graph.get_operation_by_name(
                    'test/test1/act_quant/FakeQuantWithMinMaxVars').inputs[0].
                op.type)
            self.assertEqual(
                'Relu6',
                graph.get_operation_by_name(
                    'test/act_quant/FakeQuantWithMinMaxVars').inputs[0].op.type
            )
Ejemplo n.º 13
0
  def testOneConsumerOperation(self):
    graph = ops.Graph()
    with graph.as_default():
      input_tensor = array_ops.zeros((1, 2, 3, 4))
      output_tensor = nn_ops.relu6(input_tensor)

    input_to_ops_map = input_to_ops.InputToOps(graph)
    consumer_operations = input_to_ops_map.ConsumerOperations(input_tensor.op)

    self.assertEqual(consumer_operations, {output_tensor.op})
Ejemplo n.º 14
0
  def testOneConsumerOperation(self):
    graph = ops.Graph()
    with graph.as_default():
      input_tensor = array_ops.zeros((1, 2, 3, 4))
      output_tensor = nn_ops.relu6(input_tensor)

    input_to_ops_map = input_to_ops.InputToOps(graph)
    consumer_operations = input_to_ops_map.ConsumerOperations(input_tensor.op)

    self.assertEqual(consumer_operations, {output_tensor.op})
Ejemplo n.º 15
0
  def _TestOverlappingPostActivationBypassQuantized(self, is_training):
    graph = ops.Graph()
    with graph.as_default():
      batch_size, height, width, depth = 5, 128, 128, 3
      conv_input = array_ops.zeros((batch_size, height, width, depth))
      conv1 = conv2d(
          conv_input,
          32, [5, 5],
          stride=2,
          padding='SAME',
          weights_initializer=self._WeightInit(0.09),
          activation_fn=nn_ops.relu6,
          scope='test/test1')

      # The bypass of this conv is the post activation bypass of the previous
      # conv.
      conv2 = conv2d(
          conv_input,
          32, [5, 5],
          stride=2,
          padding='SAME',
          weights_initializer=self._WeightInit(0.09),
          activation_fn=None,
          scope='test/test2')

      bypass_tensor = math_ops.add(conv1, conv2, name='test/add')
      _ = nn_ops.relu6(bypass_tensor, name='test/output')

      quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)

      # Ensure that the bypass node is preceded by a FakeQuantWithMinMaxVar
      # operation, and NOT followed by one.
      self.assertTrue('FakeQuantWithMinMaxVars' not in
                      [c.type for c in bypass_tensor.consumers()])
      self.assertTrue('FakeQuantWithMinMaxVars' in
                      [i.op.type for i in bypass_tensor.op.inputs])

      # Ensure that all the convs and activations are quantized.
      op_names = [op.name for op in graph.get_operations()]
      self.assertTrue(
          'test/test1/weights_quant/FakeQuantWithMinMaxVars' in op_names)
      self.assertTrue(
          'test/test2/weights_quant/FakeQuantWithMinMaxVars' in op_names)
      self.assertTrue(
          'test/test1/act_quant/FakeQuantWithMinMaxVars' in op_names)
      self.assertTrue('test/act_quant/FakeQuantWithMinMaxVars' in op_names)
      self.assertEqual(
          'Relu6',
          graph.get_operation_by_name(
              'test/test1/act_quant/FakeQuantWithMinMaxVars').inputs[0].op.type)
      self.assertEqual(
          'Relu6',
          graph.get_operation_by_name(
              'test/act_quant/FakeQuantWithMinMaxVars').inputs[0].op.type)
Ejemplo n.º 16
0
 def testRelu6GradGrad(self):
   inputs = constant_op.constant([[-2, -1, 1, 3], [5, 7, 8, 9]],
                                 dtype=dtypes.float32)
   x_init_value = np.array([[-3.5, -1.5, 2, 4], [4.5, 7.5, 8.5, 11]])
   r = nn_ops.relu6(inputs)
   r_g = gradients_impl.gradients(r, inputs)[0]
   with self.test_session():
     error = gradient_checker.compute_gradient_error(
       inputs, inputs.get_shape().as_list(),
       r_g, r_g.get_shape().as_list(),
       x_init_value=x_init_value)
     self.assertLess(error, 1e-4)
Ejemplo n.º 17
0
def with_fused_activation_function(input_tensor, fn_name):
    if fn_name is None or fn_name == "NONE":
        return input_tensor
    if fn_name == "RELU":
        return nn_ops.relu(input_tensor)
    if fn_name == "RELU6":
        return nn_ops.relu6(input_tensor)
    if fn_name == "RELU_N1_TO_1":
        return math_ops.maximum(-1, math_ops.minimum(input_tensor, 1))
    if fn_name == "TANH":
        return math_ops.tanh(input_tensor)
    raise AssertionError("Unknown fused_activation_function {}".format(fn_name))
Ejemplo n.º 18
0
  def test_summarize_activation_relu6(self):
    with self.cached_session():
      var = variables.Variable(1)
      op = nn_ops.relu6(var, name='SummaryTest')
      summary_op = summaries_lib.summarize_activation(op)

      self.assertEquals(summary_op.op.type, 'HistogramSummary')
      names = [op.op.name for op in ops.get_collection(ops.GraphKeys.SUMMARIES)]
      self.assertEquals(len(names), 3)
      self.assertIn(u'SummaryTest/zeros', names)
      self.assertIn(u'SummaryTest/sixes', names)
      self.assertIn(u'SummaryTest/activation', names)
Ejemplo n.º 19
0
    def _TestInsertQuantOpInSeparableConv2d(self, is_training):
        graph = ops.Graph()
        with graph.as_default():
            batch_size, height, width, depth = 5, 128, 128, 3
            input1 = array_ops.zeros((batch_size, height, width, depth))
            input2 = array_ops.zeros(
                (batch_size, height / 2, width / 2, depth))
            conv = separable_conv2d(input1,
                                    3, [5, 5],
                                    stride=2,
                                    depth_multiplier=1.0,
                                    padding='SAME',
                                    weights_initializer=self._WeightInit(0.09),
                                    activation_fn=None,
                                    scope='test/test')
            node = math_ops.add(conv, input2, name='test/add')
            node = nn_ops.relu6(node, name='test/relu6')
            update_barrier = control_flow_ops.no_op(name='update_barrier')
            with ops.control_dependencies([update_barrier]):
                array_ops.identity(node, name='control_dependency')

        quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
        # Check if output of bias add is quantized
        quantization_node_name = 'FakeQuantWithMinMaxVars'
        conv_quant = graph.get_operation_by_name('test/test/conv_quant/' +
                                                 quantization_node_name)
        self.assertEqual(conv_quant.type, quantization_node_name)

        # Check if weights for both convs inside seperable conv are quantized
        pointwise_weight_quant = graph.get_operation_by_name(
            'test/test/weights_quant/' + quantization_node_name)
        self.assertEqual(pointwise_weight_quant.type, quantization_node_name)
        depthwise_weight_quant = graph.get_operation_by_name(
            'test/test/separable_conv2d/weights_quant/' +
            quantization_node_name)
        self.assertEqual(depthwise_weight_quant.type, quantization_node_name)

        # Check if activations after first depthwise conv are quantized.
        depthwise_act_quant = graph.get_operation_by_name(
            'test/test/separable_conv2d/act_quant/' + quantization_node_name)
        self.assertEqual(depthwise_act_quant.type, quantization_node_name)

        for op in graph.get_operations():
            if op.type == quantization_node_name:
                quant_op = graph.get_operation_by_name(op.name)
                # Scan through all FakeQuant operations, ensuring that the activation
                # identity op isn't in the consumers of the operation.
                consumers = []
                for output in quant_op.outputs:
                    consumers.extend(output.consumers())

                self.assertNotIn('test/relu6', [c.name for c in consumers])
Ejemplo n.º 20
0
 def _ConvLayer(self):
     """Add a basic convolution layer to the default graph."""
     batch_size, height, width, depth = 5, 128, 128, 3
     inputs = array_ops.zeros((batch_size, height, width, depth))
     weight_init = init_ops.truncated_normal_initializer
     conv = layers.conv2d(inputs,
                          32, [5, 5],
                          stride=2,
                          padding='SAME',
                          weights_initializer=weight_init(0.09),
                          activation_fn=None,
                          scope='test')
     _ = nn_ops.relu6(conv)
Ejemplo n.º 21
0
  def _TestInsertQuantOpInSeparableConv2d(self, is_training):
    graph = ops.Graph()
    with graph.as_default():
      batch_size, height, width, depth = 5, 128, 128, 3
      input1 = array_ops.zeros((batch_size, height, width, depth))
      input2 = array_ops.zeros((batch_size, height / 2, width / 2, depth))
      conv = separable_conv2d(
          input1,
          3, [5, 5],
          stride=2,
          depth_multiplier=1.0,
          padding='SAME',
          weights_initializer=self._WeightInit(0.09),
          activation_fn=None,
          scope='test/test')
      node = math_ops.add(conv, input2, name='test/add')
      node = nn_ops.relu6(node, name='test/relu6')
      update_barrier = control_flow_ops.no_op(name='update_barrier')
      with ops.control_dependencies([update_barrier]):
        array_ops.identity(node, name='control_dependency')

    quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
    # Check if output of bias add is quantized
    quantization_node_name = 'FakeQuantWithMinMaxVars'
    conv_quant = graph.get_operation_by_name('test/test/conv_quant/' +
                                             quantization_node_name)
    self.assertEqual(conv_quant.type, quantization_node_name)

    # Check if weights for both convs inside seperable conv are quantized
    pointwise_weight_quant = graph.get_operation_by_name(
        'test/test/weights_quant/' + quantization_node_name)
    self.assertEqual(pointwise_weight_quant.type, quantization_node_name)
    depthwise_weight_quant = graph.get_operation_by_name(
        'test/test/separable_conv2d/weights_quant/' + quantization_node_name)
    self.assertEqual(depthwise_weight_quant.type, quantization_node_name)

    # Check if activations after first depthwise conv are quantized.
    depthwise_act_quant = graph.get_operation_by_name(
        'test/test/separable_conv2d/act_quant/' + quantization_node_name)
    self.assertEqual(depthwise_act_quant.type, quantization_node_name)

    for op in graph.get_operations():
      if op.type == quantization_node_name:
        quant_op = graph.get_operation_by_name(op.name)
        # Scan through all FakeQuant operations, ensuring that the activation
        # identity op isn't in the consumers of the operation.
        consumers = []
        for output in quant_op.outputs:
          consumers.extend(output.consumers())

        self.assertNotIn('test/relu6', [c.name for c in consumers])
Ejemplo n.º 22
0
 def conv(self, input_tensor):
     filters = np.random.uniform(low=-10,
                                 high=10,
                                 size=(2, 3, 3, 2)).astype('f4')
     bias = np.random.uniform(low=0, high=10, size=(2)).astype('f4')
     out = nn_ops.conv2d(input_tensor,
                         filters,
                         strides=[1, 1, 2, 1],
                         dilations=[1, 1, 1, 1],
                         padding='SAME',
                         data_format='NHWC')
     out = nn_ops.bias_add(out, bias, data_format='NHWC')
     out = nn_ops.relu6(out)
     return {'output': out}
Ejemplo n.º 23
0
  def testSeveralConsumerOperations(self):
    graph = ops.Graph()
    with graph.as_default():
      input_tensor = array_ops.zeros((1, 2, 3, 4))
      output_tensor_1 = nn_ops.relu6(input_tensor)
      output_tensor_2 = input_tensor + output_tensor_1
      output_tensor_3 = input_tensor * output_tensor_2

    input_to_ops_map = input_to_ops.InputToOps(graph)
    consumer_operations = input_to_ops_map.ConsumerOperations(input_tensor.op)

    self.assertEqual(consumer_operations,
                     {output_tensor_1.op, output_tensor_2.op,
                      output_tensor_3.op})
Ejemplo n.º 24
0
  def testSeveralConsumerOperations(self):
    graph = ops.Graph()
    with graph.as_default():
      input_tensor = array_ops.zeros((1, 2, 3, 4))
      output_tensor_1 = nn_ops.relu6(input_tensor)
      output_tensor_2 = input_tensor + output_tensor_1
      output_tensor_3 = input_tensor * output_tensor_2

    input_to_ops_map = input_to_ops.InputToOps(graph)
    consumer_operations = input_to_ops_map.ConsumerOperations(input_tensor.op)

    self.assertEqual(consumer_operations,
                     {output_tensor_1.op, output_tensor_2.op,
                      output_tensor_3.op})
Ejemplo n.º 25
0
 def _ConvLayer(self):
   """Add a basic convolution layer to the default graph."""
   batch_size, height, width, depth = 5, 128, 128, 3
   inputs = array_ops.zeros((batch_size, height, width, depth))
   weight_init = init_ops.truncated_normal_initializer
   conv = layers.conv2d(
       inputs,
       32, [5, 5],
       stride=2,
       padding='SAME',
       weights_initializer=weight_init(0.09),
       activation_fn=None,
       scope='test')
   _ = nn_ops.relu6(conv)
Ejemplo n.º 26
0
 def testRelu6GradGrad(self):
   inputs = constant_op.constant(
       [[-2, -1, 1, 3], [5, 7, 8, 9]], dtype=dtypes.float32)
   x_init_value = np.array([[-3.5, -1.5, 2, 4], [4.5, 7.5, 8.5, 11]])
   r = nn_ops.relu6(inputs)
   r_g = gradients_impl.gradients(r, inputs)[0]
   with self.test_session():
     error = gradient_checker.compute_gradient_error(
         inputs,
         inputs.get_shape().as_list(),
         r_g,
         r_g.get_shape().as_list(),
         x_init_value=x_init_value)
     self.assertLess(error, 1e-4)
Ejemplo n.º 27
0
 def testGradientFloat32(self):
   with self.test_session():
     x = constant_op.constant(
         [-0.9, -0.7, -0.5, -0.3, -0.1, 6.1, 6.3, 6.5, 6.7, 6.9],
         shape=[2, 5],
         name="x")
     y = nn_ops.relu6(x, name="relu6")
     x_init = np.asarray(
         [[-0.9, -0.7, -0.5, -0.3, -0.1], [6.1, 6.3, 6.5, 6.7, 6.9]],
         dtype=np.float32,
         order="F")
     err = gradient_checker.compute_gradient_error(
         x, [2, 5], y, [2, 5], x_init_value=x_init)
   print("relu6 (float32) gradient err = ", err)
   self.assertLess(err, 1e-4)
Ejemplo n.º 28
0
 def testGradientFloat32(self):
   with self.cached_session():
     x = constant_op.constant(
         [-0.9, -0.7, -0.5, -0.3, -0.1, 6.1, 6.3, 6.5, 6.7, 6.9],
         shape=[2, 5],
         name="x")
     y = nn_ops.relu6(x, name="relu6")
     x_init = np.asarray(
         [[-0.9, -0.7, -0.5, -0.3, -0.1], [6.1, 6.3, 6.5, 6.7, 6.9]],
         dtype=np.float32,
         order="F")
     err = gradient_checker.compute_gradient_error(
         x, [2, 5], y, [2, 5], x_init_value=x_init)
   print("relu6 (float32) gradient err = ", err)
   self.assertLess(err, 1e-4)
Ejemplo n.º 29
0
    def __call__(self, inputs, state, scope=None):
        """Long short-term memory cell (LSTM)."""
        with vs.variable_scope(scope
                               or type(self).__name__):  # "BasicLSTMCell"
            # Parameters of gates are concatenated into one multiply for efficiency.
            c, h = array_ops.split(1, 2, state)
            concat = linear([inputs, h], 4 * self._num_units, True)

            # i = input_gate, j = new_input, f = forget_gate, o = output_gate
            i, j, f, o = array_ops.split(1, 4, concat)

            new_c = c * sigmoid(f + self._forget_bias) + sigmoid(i) * tanh(j)
            new_h = relu6(new_c) * sigmoid(o)

            return new_h, array_ops.concat(1, [new_c, new_h])
Ejemplo n.º 30
0
  def testInsertQuantOpFailsWhenOpsNotConnected(self):
    graph = ops.Graph()
    with graph.as_default():
      batch_size, height, width, depth = 5, 128, 128, 3
      inputs = array_ops.zeros((batch_size, height, width, depth))
      conv = conv2d(inputs, 32, [5, 5], stride=2, padding='SAME',
                    weights_initializer=self._WeightInit(0.09),
                    activation_fn=None, scope='test')
      relu = nn_ops.relu6(inputs)

    # Inserting a quantization op between two unconnected ops should fail with
    # ValueError.
    with self.assertRaises(ValueError) as err:
      quantize._InsertQuantOp('test', conv.op, [relu.op], 'FailingQuantOp')
    self.assertEqual(
        str(err.exception), 'Some inputs not quantized for ops: [Relu6]')
Ejemplo n.º 31
0
  def _TestActivationRewriteWithScope(self, rewrite_fn):
    graph = ops.Graph()
    with graph.as_default():
      output = self._LayerWithIdentity(scope='scope1')
      with ops.name_scope('scope2'):
        output = nn_ops.relu6(output)
        scaled_output1 = math_ops.mul(2.0, output)
        scaled_output2 = math_ops.mul(3.0, output)
        output = scaled_output1 + scaled_output2
      rewrite_fn(graph)

      op_names = [op.name for op in graph.get_operations()]
      # The weights and activation of scope1 is quantized, but not scope2.
      self.assertTrue(any('scope1/Conv/act_quant' in name for name in op_names))
      self.assertTrue(
          any('scope1/Conv/weights_quant' in name for name in op_names))

      for op_name in op_names:
        if op_name.startswith('scope2'):
          self.assertTrue('FakeQuant' not in op_name)
Ejemplo n.º 32
0
  def _TestActivationRewriteWithScope(self, rewrite_fn):
    graph = ops.Graph()
    with graph.as_default():
      output = self._LayerWithIdentity(scope='scope1')
      with ops.name_scope('scope2'):
        output = nn_ops.relu6(output)
        scaled_output1 = math_ops.mul(2.0, output)
        scaled_output2 = math_ops.mul(3.0, output)
        output = scaled_output1 + scaled_output2
      rewrite_fn(graph)

      op_names = [op.name for op in graph.get_operations()]
      # The weights and activation of scope1 is quantized, but not scope2.
      self.assertTrue(any('scope1/Conv/act_quant' in name for name in op_names))
      self.assertTrue(
          any('scope1/Conv/weights_quant' in name for name in op_names))

      for op_name in op_names:
        if op_name.startswith('scope2'):
          self.assertTrue('FakeQuant' not in op_name)
Ejemplo n.º 33
0
 def _ConvLayer(
     self, input_tensor=None, scope='test', pre_activation_bypass=False,
     post_activation_bypass=False):
   """Add a basic convolution layer to the default graph."""
   batch_size, height, width, depth = 5, 128, 128, 3
   if input_tensor is None:
     input_tensor = array_ops.zeros((batch_size, height, width, depth))
   weight_init = init_ops.truncated_normal_initializer
   with ops.name_scope(scope):
     output = layers.conv2d(
         input_tensor,
         depth, [5, 5],
         padding='SAME',
         weights_initializer=weight_init(0.09),
         activation_fn=None)
     if pre_activation_bypass:
       output += input_tensor
     output = nn_ops.relu6(output)
     if post_activation_bypass:
       output += input_tensor
   return output
Ejemplo n.º 34
0
 def _ConvLayer(
     self, input_tensor=None, scope='test', pre_activation_bypass=False,
     post_activation_bypass=False):
   """Add a basic convolution layer to the default graph."""
   batch_size, height, width, depth = 5, 128, 128, 3
   if input_tensor is None:
     input_tensor = array_ops.zeros((batch_size, height, width, depth))
   weight_init = init_ops.truncated_normal_initializer
   with ops.name_scope(scope):
     output = layers.conv2d(
         input_tensor,
         depth, [5, 5],
         padding='SAME',
         weights_initializer=weight_init(0.09),
         activation_fn=None)
     if pre_activation_bypass:
       output += input_tensor
     output = nn_ops.relu6(output)
     if post_activation_bypass:
       output += input_tensor
   return output
Ejemplo n.º 35
0
  def _TestDefaultGraph(self, fn):
    with ops.Graph().as_default() as g:
      batch_size, height, width, depth = 5, 128, 128, 3
      inputs = array_ops.zeros((batch_size, height, width, depth))
      conv = layers.conv2d(
          inputs,
          32, [5, 5],
          stride=2,
          padding='SAME',
          weights_initializer=self._WeightInit(0.09),
          activation_fn=None,
          scope='test')
      _ = nn_ops.relu6(conv)

      orig_variable_names = set(
          [v.name for v in g.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])

      fn()

      q_variables = g.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
      # Ensure that variables were added.
      self.assertTrue(len(orig_variable_names) < len(q_variables))
Ejemplo n.º 36
0
 def _testRelu6(self, np_features):
     np_relu6 = self._npRelu6(np_features)
     tf_relu6 = nn_ops.relu6(np_features)
     self.assertAllClose(np_relu6, tf_relu6)
     self.assertShapeEqual(np_relu6, tf_relu6)
Ejemplo n.º 37
0
 def __call__(self, inputs, state, scope=None):
     """Most basic RNN: output = new_state = tanh(W * input + U * state + B)."""
     with vs.variable_scope(scope or type(self).__name__):  # "BasicRNNCell"
         #output = tanh(linear([inputs, state], self._num_units, False))
         output = relu6(linear([inputs, state], self._num_units, True))
     return output, output
Ejemplo n.º 38
0
 def _testRelu6(self, np_features):
   np_relu6 = self._npRelu6(np_features)
   tf_relu6 = nn_ops.relu6(np_features)
   self.assertAllClose(np_relu6, tf_relu6)
   self.assertShapeEqual(np_relu6, tf_relu6)