Esempio n. 1
0
    def _TestSkipReshapeQuantization(self, is_training):
        graph = ops.Graph()
        with graph.as_default():
            batch_size, height, width, depth = 5, 128, 128, 3
            input1 = array_ops.zeros((batch_size, height, width, depth))
            conv = conv2d(input1,
                          32, [5, 5],
                          stride=2,
                          padding='SAME',
                          weights_initializer=self._WeightInit(0.09),
                          activation_fn=nn_ops.relu6,
                          scope='test/test')

            reshape = array_ops.reshape(
                conv, (int(10), int(height / 2), int(width / 2), int(16)))

            # Insert a fake quant node after the reshape. We will check that one isn't
            # insert before.
            array_ops.fake_quant_with_min_max_vars(reshape, -1, 1)

            quantize.Quantize(graph,
                              is_training,
                              weight_bits=8,
                              activation_bits=8)

            # Ensure that there isn't a FakeQuant added before the reshape.
            self.assertFalse('FakeQuantWithMinMaxVars' in
                             [i.op.type for i in reshape.op.inputs])

        graph = ops.Graph()
        with graph.as_default():
            batch_size, height, width, depth = 5, 128, 128, 3
            input1 = array_ops.zeros((batch_size, height, width, depth))
            conv = conv2d(input1,
                          32, [5, 5],
                          stride=2,
                          padding='SAME',
                          weights_initializer=self._WeightInit(0.09),
                          activation_fn=nn_ops.relu6,
                          scope='test/test')

            reshape = array_ops.reshape(
                conv, (int(10), int(height / 2), int(width / 2), int(16)))

            # If no fake quant is added after the reshape, a FakeQuant should be added
            # before the reshape.
            quantize.Quantize(graph,
                              is_training,
                              weight_bits=8,
                              activation_bits=8)

            # Ensure that there isn't a FakeQuant added before the reshape.
            self.assertTrue('FakeQuantWithMinMaxVars' in
                            [i.op.type for i in reshape.op.inputs])
Esempio n. 2
0
    def _TestInsertQuantOpForAddAfterSeparableConv2d(self, is_training):
        graph = ops.Graph()
        with graph.as_default():
            batch_size, height, width, depth = 5, 128, 128, 3
            input1 = array_ops.zeros((batch_size, height, width, depth))
            input2 = array_ops.zeros(
                (batch_size, height / 2, width / 2, depth))
            conv = separable_conv2d(input1,
                                    None, [5, 5],
                                    stride=2,
                                    depth_multiplier=1.0,
                                    padding='SAME',
                                    weights_initializer=self._WeightInit(0.09),
                                    activation_fn=None,
                                    scope='test/test')
            node = math_ops.add(conv, input2, name='test/add')
            node = array_ops.identity(node, name='test/identity')
            update_barrier = control_flow_ops.no_op(name='update_barrier')
            with ops.control_dependencies([update_barrier]):
                array_ops.identity(node, name='control_dependency')

        quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)

        quantization_node_name = 'FakeQuantWithMinMaxVars'
        add_quant = graph.get_operation_by_name('test/add_quant/' +
                                                quantization_node_name)
        self.assertEqual(add_quant.type, quantization_node_name)
Esempio n. 3
0
 def _TestLayerActivationQuantized(self, is_training):
     graph = ops.Graph()
     with graph.as_default():
         batch_size, height, width, depth = 5, 128, 128, 3
         input1 = array_ops.zeros((batch_size, height, width, depth))
         _ = conv2d(input1,
                    32, [5, 5],
                    stride=2,
                    padding='SAME',
                    weights_initializer=self._WeightInit(0.09),
                    activation_fn=nn_ops.relu6,
                    biases_initializer=None,
                    scope='test')
         # Ensure that both weights and output of activations are quantized
         # when we have a conv->relu6 with no bias add
         quantize.Quantize(graph,
                           is_training,
                           weight_bits=8,
                           activation_bits=8)
         activation_op = graph.get_operation_by_name('test/Relu6')
         conv_op = graph.get_operation_by_name('test/Conv2D')
         self.assertTrue('test/weights_quant/FakeQuantWithMinMaxVars:0' in
                         [tensor_in.name for tensor_in in conv_op.inputs])
         self.assertTrue(
             'FakeQuantWithMinMaxVars' in
             [op.type for op in activation_op.outputs[0].consumers()])
Esempio n. 4
0
    def _testMultiplePartitionedVariables(self, is_training):
        # When weights are partitioned into multiple partitions the weights variable
        # is followed by a identity -> concat -> identity to group the partitions.
        partitioner = partitioned_variables.fixed_size_partitioner(2)
        graph = ops.Graph()
        with graph.as_default():
            with variable_scope.variable_scope('part',
                                               partitioner=partitioner):
                batch_size, height, width, depth = 5, 128, 128, 3
                input1 = array_ops.zeros((batch_size, height, width, depth))
                input2 = array_ops.zeros(
                    (batch_size, height / 2, width / 2, 32))
                conv = conv2d(input1,
                              32, [5, 5],
                              stride=2,
                              padding='SAME',
                              weights_initializer=self._WeightInit(0.09),
                              activation_fn=None,
                              scope='test/test')
                node = math_ops.add(conv, input2, name='test/add')
                node = nn_ops.relu6(node, name='test/relu6')

            quantize.Quantize(graph,
                              is_training,
                              weight_bits=8,
                              activation_bits=8)
            # Check that the weight's quant node was added.
            op_names = [op.name for op in graph.get_operations()]
            self.assertTrue(
                'part/test/test/weights_quant/FakeQuantWithMinMaxVars' in
                op_names)
Esempio n. 5
0
    def _TestPostActivationBypassQuantized(self, is_training):
        graph = ops.Graph()
        with graph.as_default():
            batch_size, height, width, depth = 5, 128, 128, 3
            input1 = array_ops.zeros((batch_size, height, width, depth))
            input2 = array_ops.zeros((batch_size, height / 2, width / 2, 32))
            conv = conv2d(input1,
                          32, [5, 5],
                          stride=2,
                          padding='SAME',
                          weights_initializer=self._WeightInit(0.09),
                          activation_fn=array_ops.identity,
                          scope='test/test')
            bypass_tensor = math_ops.add(conv, input2, name='test/add')
            # The output of the post_activation bypass will be another layer.
            _ = conv2d(bypass_tensor,
                       32, [5, 5],
                       stride=2,
                       padding='SAME',
                       weights_initializer=self._WeightInit(0.09),
                       activation_fn=array_ops.identity,
                       scope='test/unused')

            quantize.Quantize(graph,
                              is_training,
                              weight_bits=8,
                              activation_bits=8)

            # Ensure that the bypass node is preceded by and followed by a
            # FakeQuantWithMinMaxVar operation, since the output of the Add isn't an
            # activation.
            self.assertTrue('FakeQuantWithMinMaxVars' in
                            [c.type for c in bypass_tensor.consumers()])
            self.assertTrue('FakeQuantWithMinMaxVars' in
                            [i.op.type for i in bypass_tensor.op.inputs])
Esempio n. 6
0
  def _TestInsertQuantOpForAddAfterConv2d(self, is_training):
    graph = ops.Graph()
    with graph.as_default():
      batch_size, height, width, depth = 5, 128, 128, 3
      input1 = array_ops.zeros((batch_size, height, width, depth))
      input2 = array_ops.zeros((batch_size, height / 2, width / 2, 32))
      conv = conv2d(input1, 32, [5, 5], stride=2, padding='SAME',
                    weights_initializer=self._WeightInit(0.09),
                    activation_fn=None, scope='test/test')
      node = math_ops.add(conv, input2, name='test/add')
      node = nn_ops.relu6(node, name='test/relu6')
      update_barrier = control_flow_ops.no_op(name='update_barrier')
      with ops.control_dependencies([update_barrier]):
        array_ops.identity(node, name='control_dependency')

    quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)

    quantization_node_name = 'FakeQuantWithMinMaxVars'
    conv_quant = graph.get_operation_by_name('test/test/conv_quant/' +
                                             quantization_node_name)
    self.assertEqual(conv_quant.type, quantization_node_name)

    # Scan through all FakeQuant operations, ensuring that the activation
    # isn't in the consumers of the operation. Since activations are folded
    # the preceding operation during inference, the FakeQuant operation after
    # the activation is all that is needed.
    for op in graph.get_operations():
      if op.type == quantization_node_name:
        quant_op = graph.get_operation_by_name(op.name)
        consumers = []
        for output in quant_op.outputs:
          consumers.extend(output.consumers())

        self.assertNotIn('test/relu6', [c.name for c in consumers])
Esempio n. 7
0
    def _testWithNonMatchingNameScope(self, is_training):
        graph = ops.Graph()
        with graph.as_default():
            with graph.name_scope('name_scope'):
                batch_size, height, width, depth = 5, 128, 128, 3
                input1 = array_ops.zeros((batch_size, height, width, depth))
                _ = conv2d(input1,
                           32, [5, 5],
                           stride=2,
                           padding='SAME',
                           weights_initializer=self._WeightInit(0.09),
                           activation_fn=None,
                           scope='test')

        op_names_before_quantize = set(
            [op.name for op in graph.get_operations()])
        quantize.Quantize(graph,
                          is_training,
                          weight_bits=8,
                          activation_bits=8,
                          scope='NonExisting/')
        op_names_after_quantize = set(
            [op.name for op in graph.get_operations()])

        # No ops should be inserted or removed.
        self.assertEqual(op_names_before_quantize, op_names_after_quantize)
Esempio n. 8
0
def _create_graph(input_graph=None,
                  is_training=True,
                  weight_bits=8,
                  activation_bits=8,
                  quant_delay=None,
                  freeze_bn_delay=None,
                  scope=None,
                  quant_type="affine"):
  """Rewrites an input_graph in place for simulated quantization.

  The graph has fake quantization ops inserted to simulate the error
  introduced by quantization. Since the graph is transformed in place,
  the expected behavior of previously held references to nodes and tensors may
  change.

  Args:
    input_graph: The tf.Graph to be transformed, if None then defaults to the
      default graph.
    is_training: Whether quantizing training or eval graph.
    weight_bits: Number of bits to use for quantizing weights.
    activation_bits: Number of bits to use for quantizing activations.
    symmetric: If true, use symmetric quantization limits instead of training
      the minimum and maximum of each quantization range separately.
    quant_delay: Number of steps after which weights and activations are
      quantized during training.
    freeze_bn_delay: Number of steps after which moving mean and variance are
      frozen and used instead of batch statistics during training.
      freeze_bn_delay should be greater than quant_delay and should correspond
      to the number of steps when training has almost converged
    scope: The scope to be transformed. If it's not None, only the ops which
      are in this scope will be transformed.

  Raises:
    ValueError: If elements contains an element that isn't a tf.Tensor or
      tf.Operation.
  """
  print("Success: using a patched version of tf.contrib.quantize.")

  if input_graph is None:
    input_graph = ops.get_default_graph()

  symmetric = True if quant_type == "symmetric" else False

  # Add check to see if graph has training ops, if so provide error message and
  # exit
  _check_for_training_ops(input_graph)
  with input_graph.as_default():
    fold_batch_norms.FoldBatchNorms(
        input_graph,
        freeze_batch_norm_delay=freeze_bn_delay,
        is_training=is_training)
    quantize.Quantize(
        input_graph,
        is_training,
        quant_delay=quant_delay,
        weight_bits=weight_bits,
        activation_bits=activation_bits,
        symmetric=symmetric,
        scope=scope)
Esempio n. 9
0
    def _TestOverlappingPostActivationBypassQuantized(self, is_training):
        graph = ops.Graph()
        with graph.as_default():
            batch_size, height, width, depth = 5, 128, 128, 3
            conv_input = array_ops.zeros((batch_size, height, width, depth))
            conv1 = conv2d(conv_input,
                           32, [5, 5],
                           stride=2,
                           padding='SAME',
                           weights_initializer=self._WeightInit(0.09),
                           activation_fn=array_ops.identity,
                           scope='test/test1')

            # The bypass of this conv is the post activation bypass of the previous
            # conv.
            conv2 = conv2d(conv_input,
                           32, [5, 5],
                           stride=2,
                           padding='SAME',
                           weights_initializer=self._WeightInit(0.09),
                           activation_fn=None,
                           scope='test/test2')

            bypass_tensor = math_ops.add(conv1, conv2, name='test/add')
            _ = array_ops.identity(bypass_tensor, name='test/output')

            quantize.Quantize(graph,
                              is_training,
                              weight_bits=8,
                              activation_bits=8)

            # Ensure that the bypass node is preceded by a FakeQuantWithMinMaxVar
            # operation, and NOT followed by one.
            self.assertTrue('FakeQuantWithMinMaxVars' not in
                            [c.type for c in bypass_tensor.consumers()])
            self.assertTrue('FakeQuantWithMinMaxVars' in
                            [i.op.type for i in bypass_tensor.op.inputs])

            # Ensure that all the convs and activations are quantized.
            op_names = [op.name for op in graph.get_operations()]
            self.assertTrue(
                'test/test1/weights_quant/FakeQuantWithMinMaxVars' in op_names)
            self.assertTrue(
                'test/test2/weights_quant/FakeQuantWithMinMaxVars' in op_names)
            self.assertTrue(
                'test/test1/act_quant/FakeQuantWithMinMaxVars' in op_names)
            self.assertTrue(
                'test/act_quant/FakeQuantWithMinMaxVars' in op_names)
            self.assertEqual(
                'Identity',
                graph.get_operation_by_name(
                    'test/test1/act_quant/FakeQuantWithMinMaxVars').inputs[0].
                op.type)
            self.assertEqual(
                'Identity',
                graph.get_operation_by_name(
                    'test/act_quant/FakeQuantWithMinMaxVars').inputs[0].op.type
            )
 def _AssertIdempotent(self, graph):
     # Ensure that calling the rewrite again doesn't change the graph.
     graph_def_before = str(graph.as_graph_def())
     with graph.as_default():
         # Ensuring that calling the rewrite again doesn't add more nodes.
         fold_batch_norms.FoldBatchNorms(graph, is_training=True)
         quantize.Quantize(graph, True)
     graph_def_after = str(graph.as_graph_def())
     self.assertEqual(graph_def_before, graph_def_after)
Esempio n. 11
0
def _create_graph(input_graph,
                  is_training,
                  elements=None,
                  device_name_or_function=None):
    """Returns a transformed training input_graph for simulated quantization.

  The forward pass has fake quantization ops inserted to simulate the error
  introduced by quantization.

  Args:
    input_graph: The tf.Graph to be transformed.
    is_training: Whether quantizing training or eval graph.
    elements: (Optional) List of Tensors and Operations in input_graph whose
        corresponding elements in the new graph will be returned.
    device_name_or_function: (Optional) The device name or function to use.

  Returns:
    g is new tf.Graph that is rewritten for simulated quantization.
    l is a list of Tensors/Operations in g corresponding to the provided input
        elements, if elements is not None.

  Raises:
    ValueError: If elements contains an element that isn't a tf.Tensor or
        tf.Operation.
  """
    # TODO(suharshs): Describe the process in more detail in the doc string.
    g = copy_graph.CopyGraph(input_graph)
    if is_training:
        # TODO(raghuramank): Need to make freeze_batch_norm_delay
        # a function of the batch size. For now setting this to 250 epochs
        # This corresponds to 5 million steps at a batch size of 64.
        freeze_batch_norm_delay = 5000000
    else:
        freeze_batch_norm_delay = None
    with g.as_default():
        with ops.device(device_name_or_function):
            fold_batch_norms.FoldBatchNorms(
                g,
                freeze_batch_norm_delay=freeze_batch_norm_delay,
                is_training=is_training)
            quantize.Quantize(g, is_training=is_training)
    if elements is None:
        return g

    return_elements = []
    for element in elements:
        if isinstance(element, (ops.Tensor, variables.Variable)):
            return_elements.append(g.get_tensor_by_name(element.name))
        elif isinstance(element, ops.Operation):
            return_elements.append(g.get_operation_by_name(element.name))
        else:
            raise ValueError(
                'elements must consist of Tensor or Operation objects, got: ',
                str(element))
    return g, return_elements
    def _TestQuantize_AtrousConvWithBatchNorm(self, activation,
                                              activation_op_name, with_bypass,
                                              delay, fused_batch_norm,
                                              use_resource, scope):
        """Tests quantization: inputs -> atrous conv with batch norm -> Activation.

    Args:
      activation: Callable that returns an Operation, a factory method for the
        Activation.
      activation_op_name: String, name of the Activation operation.
      with_bypass: Bool, when true there is an extra connection added from
        inputs to just before Activation.
      delay: Int (optional), delay in number of steps until quantization starts.
      fused_batch_norm: Bool, when true use FusedBatchNorm.
      use_resource: Bool, when true uses resource variables.
      scope: String, specifies top level scope for the graph
    """
        graph = ops.Graph()
        with graph.as_default():
            variable_scope.get_variable_scope().set_use_resource(use_resource)
            batch_size, height, width, depth = 5, 128, 128, 3
            inputs = array_ops.zeros((batch_size, height, width, depth))
            dilation_rate = 2
            conv_scope = self._GetConvScope(scope, with_bypass)
            scope = '' if scope is None else scope
            delim = '/' if scope else ''

            node = separable_conv2d(
                inputs,
                None, [3, 3],
                rate=dilation_rate,
                depth_multiplier=1.0,
                padding='SAME',
                weights_initializer=self._WeightInit(0.09),
                activation_fn=None,
                normalizer_fn=batch_norm,
                normalizer_params=self._BatchNormParams(fused_batch_norm),
                scope=conv_scope)

            # Manually add a bypass (optional) and an activation.
            if with_bypass:
                node = math_ops.add(inputs, node, name=scope + delim + 'Add')

            node = activation(node, name=scope + delim + activation_op_name)

            update_barrier = control_flow_ops.no_op(name='update_barrier')
            with ops.control_dependencies([update_barrier]):
                array_ops.identity(node, name='control_dependency')

            fold_batch_norms.FoldBatchNorms(graph, is_training=True)
            quantize.Quantize(graph, True, quant_delay=delay)

            self._AssertCorrectQuantizedGraphWithBatchNorm(
                graph, scope, 'DepthwiseConv2dNative', activation_op_name,
                with_bypass, delay, use_resource)
Esempio n. 13
0
    def _TestInsertQuantOpInSeparableConv2d(self, is_training):
        graph = ops.Graph()
        with graph.as_default():
            batch_size, height, width, depth = 5, 128, 128, 3
            input1 = array_ops.zeros((batch_size, height, width, depth))
            input2 = array_ops.zeros(
                (batch_size, height / 2, width / 2, depth))
            conv = separable_conv2d(input1,
                                    3, [5, 5],
                                    stride=2,
                                    depth_multiplier=1.0,
                                    padding='SAME',
                                    weights_initializer=self._WeightInit(0.09),
                                    activation_fn=None,
                                    scope='test/test')
            node = math_ops.add(conv, input2, name='test/add')
            node = nn_ops.relu6(node, name='test/relu6')
            update_barrier = control_flow_ops.no_op(name='update_barrier')
            with ops.control_dependencies([update_barrier]):
                array_ops.identity(node, name='control_dependency')

        quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
        # Check if output of bias add is quantized
        quantization_node_name = 'FakeQuantWithMinMaxVars'
        conv_quant = graph.get_operation_by_name('test/test/conv_quant/' +
                                                 quantization_node_name)
        self.assertEqual(conv_quant.type, quantization_node_name)

        # Check if weights for both convs inside seperable conv are quantized
        pointwise_weight_quant = graph.get_operation_by_name(
            'test/test/weights_quant/' + quantization_node_name)
        self.assertEqual(pointwise_weight_quant.type, quantization_node_name)
        depthwise_weight_quant = graph.get_operation_by_name(
            'test/test/separable_conv2d/weights_quant/' +
            quantization_node_name)
        self.assertEqual(depthwise_weight_quant.type, quantization_node_name)

        # Check if activations after first depthwise conv are quantized.
        depthwise_act_quant = graph.get_operation_by_name(
            'test/test/separable_conv2d/act_quant/' + quantization_node_name)
        self.assertEqual(depthwise_act_quant.type, quantization_node_name)

        for op in graph.get_operations():
            if op.type == quantization_node_name:
                quant_op = graph.get_operation_by_name(op.name)
                # Scan through all FakeQuant operations, ensuring that the activation
                # identity op isn't in the consumers of the operation.
                consumers = []
                for output in quant_op.outputs:
                    consumers.extend(output.consumers())

                self.assertNotIn('test/relu6', [c.name for c in consumers])
Esempio n. 14
0
    def _TestQuantize_Conv2dWithBatchNorm(self, activation, activation_op_name,
                                          with_bypass, delay, fused_batch_norm,
                                          use_resource):
        """Tests quantization: inputs -> Conv2d with batch norm -> Activation.

    Args:
      activation: Callable that returns an Operation, a factory method for the
        Activation.
      activation_op_name: String, name of the Activation operation.
      with_bypass: Bool, when true there is an extra connection added from
        inputs to just before Activation.
      delay: Int (optional), delay in number of steps until quantization starts.
      fused_batch_norm: Bool, when true use FusedBatchNorm.
      use_resource: Bool, when true uses resource variables.
    """
        graph = ops.Graph()
        with graph.as_default():
            variable_scope.get_variable_scope().set_use_resource(use_resource)
            batch_size, height, width, depth = 5, 128, 128, 3
            inputs = array_ops.zeros((batch_size, height, width, depth))
            stride = 1 if with_bypass else 2
            out_depth = 3 if with_bypass else 32
            scope = 'test/test2' if with_bypass else 'test'
            node = conv2d(
                inputs,
                out_depth, [5, 5],
                stride=stride,
                padding='SAME',
                weights_initializer=self._WeightInit(0.09),
                activation_fn=None,
                normalizer_fn=batch_norm,
                normalizer_params=self._BatchNormParams(fused_batch_norm),
                scope=scope)

            # Manually add a bypass (optionaly) and an activation.
            if with_bypass:
                node = math_ops.add(inputs, node, name='test/Add')

            node = activation(node, name='test/' + activation_op_name)

            update_barrier = control_flow_ops.no_op(name='update_barrier')
            with ops.control_dependencies([update_barrier]):
                array_ops.identity(node, name='control_dependency')

            fold_batch_norms.FoldBatchNorms(graph, is_training=True)
            quantize.Quantize(graph, True, quant_delay=delay)

            self._AssertCorrectQuantizedGraphWithBatchNorm(
                graph, scope, 'Conv2D', activation_op_name, with_bypass, delay,
                use_resource)
    def _TestQuantize_Conv2dWithoutBatchNorm(self, activation,
                                             activation_op_name, with_bypass,
                                             delay, use_resource, scope):
        """Tests quantization: inputs -> Conv2d no batch norm -> Activation.

    Args:
      activation: Callable that returns an Operation, a factory method for the
        Activation.
      activation_op_name: String, name of the Activation operation.
      with_bypass: Bool, when true there is an extra connection added from
        inputs to just before Activation.
      delay: Int (optional), delay in number of steps until quantization starts.
      use_resource: Bool, when true uses resource variables.
      scope: String, specifies top level scope for the graph
    """
        graph = ops.Graph()
        with graph.as_default():
            variable_scope.get_variable_scope().set_use_resource(use_resource)
            batch_size, height, width, depth = 5, 128, 128, 3
            inputs = array_ops.zeros((batch_size, height, width, depth))
            stride = 1 if with_bypass else 2
            out_depth = 3 if with_bypass else 32
            activation_fn = None if with_bypass else activation
            conv_scope = self._GetConvScope(scope, with_bypass)
            scope = '' if scope is None else scope
            delim = '/' if scope else ''
            node = conv2d(inputs,
                          out_depth, [5, 5],
                          stride=stride,
                          padding='SAME',
                          weights_initializer=self._WeightInit(0.09),
                          activation_fn=activation_fn,
                          scope=conv_scope)
            if with_bypass:
                node = math_ops.add(inputs, node, name=scope + delim + 'Add')
                node = activation(node,
                                  name=scope + delim + activation_op_name)
            update_barrier = control_flow_ops.no_op(name='update_barrier')
            with ops.control_dependencies([update_barrier]):
                array_ops.identity(node, name='control_dependency')

            quantize.Quantize(graph, True, quant_delay=delay)

        if conv_scope is None:
            conv_scope = ''

        self._AssertCorrectQuantizedGraphWithoutBatchNorm(
            graph, scope, 'Conv2D', activation_op_name, with_bypass, delay,
            use_resource)
Esempio n. 16
0
  def _TestWithNullNameScope(self, is_training):
    graph = ops.Graph()
    with graph.as_default():
      with graph.name_scope(None):
        batch_size, height, width, depth = 5, 128, 128, 32
        input1 = array_ops.zeros((batch_size, height, width, depth))
        _ = conv2d(
            input1,
            32, [5, 5],
            padding='SAME',
            weights_initializer=self._WeightInit(0.09),
            activation_fn=None,
            scope='test')

        quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
Esempio n. 17
0
def _create_graph(input_graph=None,
                  is_training=True,
                  weight_bits=8,
                  activation_bits=8,
                  quant_delay=None,
                  freeze_bn_delay=None,
                  scope=None):
  """Rewrites an input_graph in place for simulated quantization.

  The graph has fake quantization ops inserted to simulate the error
  introduced by quantization. Since the graph is transformed in place,
  the expected behavior of previously held references to nodes and tensors may
  change.

  Args:
    input_graph: The tf.Graph to be transformed, if None then defaults to the
      default graph.
    is_training: Whether quantizing training or eval graph.
    weight_bits: Number of bits to use for quantizing weights.
    activation_bits: Number of bits to use for quantizing activations.
    quant_delay: Number of steps after which weights and activations are
      quantized during training.
    freeze_bn_delay: Number of steps after which moving mean and variance are
      frozen and used instead of batch statistics during training.
      freeze_bn_delay should be greater than quant_delay and should correspond
      to the number of steps when training has almost converged
    scope: The scope to be transformed. If it's not None, only the ops which
      are in this scope will be transformed.

  Raises:
    ValueError: If elements contains an element that isn't a tf.Tensor or
      tf.Operation.
  """

  if input_graph is None:
    input_graph = ops.get_default_graph()
  with input_graph.as_default():
    fold_batch_norms.FoldBatchNorms(
        input_graph,
        freeze_batch_norm_delay=freeze_bn_delay,
        is_training=is_training)
    quantize.Quantize(
        input_graph,
        is_training,
        quant_delay=quant_delay,
        weight_bits=weight_bits,
        activation_bits=activation_bits,
        scope=scope)
 def build_eval_graph(self):
     g = tf.Graph()
     with g.as_default():
         sess, saver = load_graph(g, self._graph, self._checkpoint)
         if self._fold_bn:
             fold_batch_norms.FoldBatchNorms(graph=sess.graph,
                                             freeze_batch_norm_delay=None,
                                             is_training=False)
         if self._quantize:
             quantize.Quantize(graph=sess.graph,
                               is_training=False,
                               quant_delay=0,
                               weight_bits=8,
                               activation_bits=8,
                               scope=None)
     return sess, saver
    def _TestBatchNormForcedUpdates(self, activation, activation_op_name,
                                    fused_batch_norm, use_resource):
        """post_activation bypass quantization should happen with forced updates."""
        graph = ops.Graph()
        with graph.as_default():
            variable_scope.get_variable_scope().set_use_resource(use_resource)
            batch_size, height, width, depth = 5, 128, 128, 3
            input1 = array_ops.zeros((batch_size, height, width, depth))
            input2 = array_ops.zeros((batch_size, height / 2, width / 2, 32))
            # Setting updates_collections to None forces updates adding an extra
            # identity operation following batch norms.
            bn_params = self._BatchNormParams(fused=fused_batch_norm,
                                              force_updates=True)
            conv = conv2d(input1,
                          32, [5, 5],
                          stride=2,
                          padding='SAME',
                          weights_initializer=self._WeightInit(0.09),
                          activation_fn=activation,
                          normalizer_fn=batch_norm,
                          normalizer_params=bn_params,
                          scope='test/test')
            bypass_tensor = math_ops.add(conv, input2, name='test/add')
            # The output of the post_activation bypass will be another layer.
            _ = conv2d(bypass_tensor,
                       32, [5, 5],
                       stride=2,
                       padding='SAME',
                       weights_initializer=self._WeightInit(0.09),
                       normalizer_fn=batch_norm,
                       normalizer_params=bn_params,
                       activation_fn=activation,
                       scope='test/unused')

            fold_batch_norms.FoldBatchNorms(graph, is_training=True)
            quantize.Quantize(graph, is_training=True)

            # Ensure that the bypass node is preceded by and followed by a
            # FakeQuantWithMinMaxVar operation, since the output of the Add isn't an
            # activation.
            self.assertTrue('FakeQuantWithMinMaxVars' in
                            [c.type for c in bypass_tensor.consumers()])
            self.assertTrue('FakeQuantWithMinMaxVars' in
                            [i.op.type for i in bypass_tensor.op.inputs])

        with open('/tmp/bn_quant_test.pbtxt', 'w') as f:
            f.write(str(graph.as_graph_def()))
Esempio n. 20
0
 def _TestFinalLayerQuantized(self, is_training):
   graph = ops.Graph()
   with graph.as_default():
     batch_size, height, width, depth = 5, 128, 128, 3
     input1 = array_ops.zeros((batch_size, height, width, depth))
     _ = conv2d(
         input1,
         32, [5, 5],
         stride=2,
         padding='SAME',
         weights_initializer=self._WeightInit(0.09),
         activation_fn=None,
         scope='test')
     # Ensure that the a FakeQuant operation is in the outputs of the BiasAdd.
     bias_add_op = graph.get_operation_by_name('test/BiasAdd')
     quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
     self.assertTrue('FakeQuantWithMinMaxVars' in
                     [op.type for op in bias_add_op.outputs[0].consumers()])
Esempio n. 21
0
    def _TestQuantize_DepthwiseConv2dWithoutBatchNorm(self, activation,
                                                      activation_op_name,
                                                      with_bypass, delay,
                                                      use_resource):
        """Tests quantization: inputs -> DWConv2d no batch norm -> Activation.

    Args:
      activation: Callable that returns an Operation, a factory method for the
        Activation.
      activation_op_name: String, name of the Activation operation.
      with_bypass: Bool, when true there is an extra connection added from
        inputs to just before Activation.
      delay: Int (optional), delay in number of steps until quantization starts.
      use_resource: Bool, when true uses resource variables.
    """
        graph = ops.Graph()
        with graph.as_default():
            variable_scope.get_variable_scope().set_use_resource(use_resource)
            batch_size, height, width, depth = 5, 128, 128, 3
            inputs = array_ops.zeros((batch_size, height, width, depth))
            stride = 1 if with_bypass else 2
            activation_fn = None if with_bypass else activation
            scope = 'test/test2' if with_bypass else 'test'
            node = separable_conv2d(inputs,
                                    None, [5, 5],
                                    stride=stride,
                                    depth_multiplier=1.0,
                                    padding='SAME',
                                    weights_initializer=self._WeightInit(0.09),
                                    activation_fn=activation_fn,
                                    scope=scope)
            if with_bypass:
                node = math_ops.add(inputs, node, name='test/Add')
                node = activation(node, name='test/' + activation_op_name)
            update_barrier = control_flow_ops.no_op(name='update_barrier')
            with ops.control_dependencies([update_barrier]):
                array_ops.identity(node, name='control_dependency')
            quantize.Quantize(graph, True, quant_delay=delay)

        self._AssertCorrectQuantizedGraphWithoutBatchNorm(
            graph, scope, 'DepthwiseConv2dNative', activation_op_name,
            with_bypass, delay, use_resource)
Esempio n. 22
0
  def _TestWithNameScope(self, is_training):
    graph = ops.Graph()
    with graph.as_default():
      with graph.name_scope('name_scope'):
        batch_size, height, width, depth = 5, 128, 128, 3
        input1 = array_ops.zeros((batch_size, height, width, depth))
        _ = conv2d(
            input1,
            32, [5, 5],
            stride=2,
            padding='SAME',
            weights_initializer=self._WeightInit(0.09),
            activation_fn=None,
            scope='test')

        quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)

    for op in graph.get_operations():
      self.assertTrue(not op.name.startswith('name_scope/name_scope/'),
                      'Broken op: %s' % op.name)
Esempio n. 23
0
def _create_graph(input_graph, is_training, elements=None):
    """Returns a transformed training input_graph for simulated quantization.

  The forward pass has fake quantization ops inserted to simulate the error
  introduced by quantization.

  Args:
    input_graph: The tf.Graph to be transformed.
    is_training: Whether quantizing training or eval graph.
    elements: (Optional) List of Tensors and Operations in input_graph whose
        corresponding elements in the new graph will be returned.

  Returns:
    Returns a tuple(g, l) where:
    g is new tf.Graph that is rewritten for simulated quantization.
    l is a list of Tensors/Operations in g corresponding to the provided input
        elements.

  Raises:
    ValueError: If elements contains an element that isn't a tf.Tensor or
        tf.Operation.
  """
    # TODO(suharshs): Describe the process in more detail in the doc string.
    g = copy_graph.CopyGraph(input_graph)
    fold_batch_norms.FoldBatchNorms(g)
    quantize.Quantize(g, is_training=is_training)
    return_elements = []
    if elements is None:
        elements = []
    for element in elements:
        if isinstance(element, (ops.Tensor, variables.Variable)):
            return_elements.append(g.get_tensor_by_name(element.name))
        elif isinstance(element, ops.Operation):
            return_elements.append(g.get_operation_by_name(element.name))
        else:
            raise ValueError(
                'elements must consist of Tensor or Operation objects, got: ',
                str(element))
    return g, return_elements
Esempio n. 24
0
  def testSeparableConvWithResourceVar(self):
    graph = ops.Graph()
    with graph.as_default():
      with variable_scope.variable_scope('', use_resource=True):
        batch_size, height, width, depth = 5, 128, 128, 3
        input1 = array_ops.zeros((batch_size, height, width, depth))
        kernel_size, depth_multiplier = 3, 1
        depthwise_shape = [kernel_size, kernel_size, depth, depth_multiplier]
        depthwise_weights = variables.model_variable(
            'depthwise_weights', shape=depthwise_shape)
        strides = [1, 1, 1, 1]
        with variable_scope.variable_scope('depthwise_conv_1'):
          conv1 = nn.depthwise_conv2d(
              input1, depthwise_weights, strides, padding='SAME')
        with variable_scope.variable_scope('depthwise_conv_2'):
          conv2 = nn.depthwise_conv2d(
              conv1, depthwise_weights, strides, padding='SAME')
          math_ops.add(conv2, input1, name='add')

    quantize.Quantize(graph, True)

    # Test that the weights and activations of all convs have been quantized.
    quant_node_name = 'FakeQuantWithMinMaxVars'
    weights_quant = graph.get_operation_by_name(
        'depthwise_conv_1/weights_quant/' + quant_node_name)
    self.assertEqual(weights_quant.type, quant_node_name)
    act_quant = graph.get_operation_by_name('depthwise_conv_1/act_quant/' +
                                            quant_node_name)
    self.assertEqual(act_quant.type, quant_node_name)

    weights_quant = graph.get_operation_by_name(
        'depthwise_conv_2/weights_quant/' + quant_node_name)
    self.assertEqual(weights_quant.type, quant_node_name)
    act_quant = graph.get_operation_by_name('depthwise_conv_2/act_quant/' +
                                            quant_node_name)
    self.assertEqual(act_quant.type, quant_node_name)
  def _TestQuantize_DepthwiseConv2dWithBatchNorm(
      self, activation, activation_op_name, with_bypass, delay,
      fused_batch_norm):
    """Tests quantization: inputs -> DWConv2d with batch norm -> Activation.

    Args:
      activation: Callable that returns an Operation, a factory method for the
        Activation.
      activation_op_name: String, name of the Activation operation.
      with_bypass: Bool, when true there is an extra connection added from
        inputs to just before Activation.
      delay: Int (optional), delay in number of steps until quantization starts.
      fused_batch_norm: Bool, when true use FusedBatchNorm.
    """
    graph = ops.Graph()
    with graph.as_default():
      batch_size, height, width, depth = 5, 128, 128, 3
      inputs = array_ops.zeros((batch_size, height, width, depth))
      stride = 1 if with_bypass else 2
      scope = 'test/test2' if with_bypass else 'test'
      node = separable_conv2d(
          inputs,
          None, [5, 5],
          stride=stride,
          depth_multiplier=1.0,
          padding='SAME',
          weights_initializer=self._WeightInit(0.09),
          activation_fn=None,
          normalizer_fn=batch_norm,
          normalizer_params=self._BatchNormParams(fused_batch_norm),
          scope=scope)

      # Manually add a bypass (optionaly) and an activation.
      if with_bypass:
        node = math_ops.add(inputs, node, name='test/Add')

      node = activation(node, name='test/' + activation_op_name)

      update_barrier = control_flow_ops.no_op(name='update_barrier')
      with ops.control_dependencies([update_barrier]):
        array_ops.identity(node, name='control_dependency')

      fold_batch_norms.FoldBatchNorms(graph)

      quantize.Quantize(graph, quant_delay=delay)
    quantization_node_name = 'FakeQuantWithMinMaxVars'
    weights_quant = graph.get_operation_by_name(scope + '/weights_quant/' +
                                                quantization_node_name)
    self.assertEqual(weights_quant.type, quantization_node_name)
    expected_inputs = [
        scope + '/weights_quant/' + 'AssignMinLast',
        scope + '/weights_quant/' + 'AssignMaxLast', scope + '/mul_fold'
    ]
    self._AssertInputOpsAre(weights_quant, expected_inputs)
    output_op_name = scope + ('/weights_quant/delayed_quant/Switch_1'
                              if delay else '/depthwise_Fold')
    self._AssertOutputGoesToOps(weights_quant, graph, [output_op_name])

    if with_bypass:
      conv_quant = graph.get_operation_by_name(scope + '/conv_quant/' +
                                               quantization_node_name)
      self.assertEqual(conv_quant.type, quantization_node_name)
      expected_inputs = [
          scope + '/conv_quant/AssignMinEma',
          scope + '/conv_quant/AssignMaxEma', scope + '/add_fold'
      ]
      self._AssertInputOpsAre(conv_quant, expected_inputs)
      output_op_name = (scope + '/conv_quant/delayed_quant/Switch_1'
                        if delay else 'test/Add')
      self._AssertOutputGoesToOps(conv_quant, graph, [output_op_name])

    act_quant = graph.get_operation_by_name('test/act_quant/' +
                                            quantization_node_name)
    self.assertEqual(act_quant.type, quantization_node_name)
    expected_inputs = [
        'test/act_quant/AssignMinEma', 'test/act_quant/AssignMaxEma',
        'test/' + activation_op_name
    ]
    self._AssertInputOpsAre(act_quant, expected_inputs)
    output_op_name = ('test/act_quant/delayed_quant/Switch_1'
                      if delay else 'control_dependency')
    self._AssertOutputGoesToOps(act_quant, graph, [output_op_name])
  def _testQuantize_DepthwiseConv2dWithBatchNorm(
      self, activation, activation_op_name, with_bypass, delay, use_ema):
    """Tests quantization: inputs -> DWConv2d with batch norm -> Activation.

    Args:
      activation: Callable that returns an Operation, a factory method for the
        Activation.
      activation_op_name: String, name of the Activation operation.
      with_bypass: Bool, when true there is an extra connection added from
        inputs to just before Activation.
      delay: Int (optional), delay in number of steps until quantization starts.
      use_ema: Bool, when true uses EMA quantization for BN folded weights.
    """
    graph = ops.Graph()
    with graph.as_default():
      training.create_global_step(graph)

      batch_size, height, width, depth = 5, 128, 128, 3
      inputs = array_ops.zeros((batch_size, height, width, depth))
      stride = 1 if with_bypass else 2
      scope = 'test/test2' if with_bypass else 'test'
      node = separable_conv2d(inputs, None, [5, 5], stride=stride,
                              depth_multiplier=1.0, padding='SAME',
                              weights_initializer=self._WeightInit(0.09),
                              activation_fn=None,
                              normalizer_fn=batch_norm,
                              normalizer_params=_DEFAULT_BATCH_NORM_PARAMS,
                              scope=scope)
      # Manually fold the batch norm.
      weights = (graph.get_operation_by_name(scope + '/depthwise_weights/read')
                 .outputs[0])
      bn_mult = (graph.get_operation_by_name(scope + '/BatchNorm/batchnorm/mul')
                 .outputs[0])
      new_shape = [
          weights.get_shape().as_list()[2], weights.get_shape().as_list()[3]
      ]
      bn_mult_reshaped = array_ops.reshape(
          bn_mult, new_shape, name=scope + '/gamma_reshape')
      mul_fold = math_ops.multiply(
          weights, bn_mult_reshaped, name=scope + '/mul_fold')
      stride = [1, stride, stride, 1]
      conv_fold = nn_ops.depthwise_conv2d(
          input=inputs,
          filter=mul_fold,
          padding='SAME',
          strides=stride,
          name=scope + '/depthwise_Fold')
      bn_bias = (graph.get_operation_by_name(scope + '/BatchNorm/batchnorm/sub')
                 .outputs[0])
      add_fold = math_ops.add(conv_fold, bn_bias, name=scope + '/add_fold')
      # Manually add a bypass (optionaly) and an activation.
      if with_bypass:
        node = math_ops.add(inputs, add_fold, name='test/Add')
      else:
        node = add_fold
      node = activation(node, name='test/' + activation_op_name)

      update_barrier = control_flow_ops.no_op(name='update_barrier')
      with ops.control_dependencies([update_barrier]):
        array_ops.identity(node, name='control_dependency')

      quantize.Quantize(
          graph, quant_delay=delay, quantize_folded_weights_use_ema=use_ema)
    quantization_node_name = 'FakeQuantWithMinMaxVars'
    weights_quant = graph.get_operation_by_name(scope + '/weights_quant/' +
                                                quantization_node_name)
    self.assertEqual(weights_quant.type, quantization_node_name)
    expected_inputs = [
        scope + '/weights_quant/' + ('min/read' if use_ema else 'Minimum'),
        scope + '/weights_quant/' + ('max/read' if use_ema else 'Maximum'),
        scope + '/mul_fold'
    ]
    self._AssertInputOpsAre(weights_quant, expected_inputs)
    output_op_name = scope + ('/weights_quant/delayed_quant/Switch_1'
                              if delay and use_ema else '/depthwise_Fold')
    self._AssertOutputGoesToOps(weights_quant, graph, [output_op_name])

    if with_bypass:
      conv_quant = graph.get_operation_by_name(scope + '/conv_quant/' +
                                               quantization_node_name)
      self.assertEqual(conv_quant.type, quantization_node_name)
      expected_inputs = [
          scope + '/conv_quant/min/read', scope + '/conv_quant/max/read',
          scope + '/add_fold'
      ]
      self._AssertInputOpsAre(conv_quant, expected_inputs)
      output_op_name = (scope + '/conv_quant/delayed_quant/Switch_1'
                        if delay else 'test/Add')
      self._AssertOutputGoesToOps(conv_quant, graph, [output_op_name])

    act_quant = graph.get_operation_by_name('test/act_quant/' +
                                            quantization_node_name)
    self.assertEqual(act_quant.type, quantization_node_name)
    expected_inputs = [
        'test/act_quant/min/read', 'test/act_quant/max/read',
        'test/' + activation_op_name
    ]
    self._AssertInputOpsAre(act_quant, expected_inputs)
    output_op_name = ('test/act_quant/delayed_quant/Switch_1'
                      if delay else 'control_dependency')
    self._AssertOutputGoesToOps(act_quant, graph, [output_op_name])
  def _testQuantize_FCWithBatchNorm(self, activation, activation_op_name,
                                    with_bypass, delay, fused_batch_norm,
                                    use_ema):
    """Tests quantization: inputs -> FC with batch norm -> Activation.

    Args:
      activation: Callable that returns an Operation, a factory method for the
        Activation.
      activation_op_name: String, name of the Activation operation.
      with_bypass: Bool, when true there is an extra connection added from
        inputs to just before Activation.
      delay: Int (optional), delay in number of steps until quantization starts.
      fused_batch_norm: Bool, when true use FusedBatchNorm.
      use_ema: Bool, when true uses EMA quantization for BN folded weights.
    """
    graph = ops.Graph()
    with graph.as_default():
      training.create_global_step(graph)

      batch_size, depth = 5, 256
      inputs = array_ops.zeros((batch_size, depth))
      out_depth = 256 if with_bypass else 128
      scope = 'test/test2' if with_bypass else 'test'
      node = fully_connected(
          inputs,
          out_depth,
          weights_initializer=self._WeightInit(0.03),
          activation_fn=None,
          normalizer_fn=batch_norm,
          normalizer_params=self._BatchNormParams(fused_batch_norm),
          scope=scope)

      # Manually add a bypass (optionaly) and an activation.
      if with_bypass:
        node = math_ops.add(inputs, node, name='test/Add')

      node = activation(node, name='test/' + activation_op_name)

      update_barrier = control_flow_ops.no_op(name='update_barrier')
      with ops.control_dependencies([update_barrier]):
        array_ops.identity(node, name='control_dependency')

      fold_batch_norms.FoldBatchNorms(graph)

      quantize.Quantize(
          graph, quant_delay=delay, quantize_folded_weights_use_ema=use_ema)

    quantization_node_name = 'FakeQuantWithMinMaxVars'
    weights_quant = graph.get_operation_by_name(scope + '/weights_quant/' +
                                                quantization_node_name)
    self.assertEqual(weights_quant.type, quantization_node_name)
    expected_inputs = [
        scope + '/weights_quant/' + ('AssignMinEma'
                                     if use_ema else 'AssignMinLast'),
        scope + '/weights_quant/' + ('AssignMaxEma'
                                     if use_ema else 'AssignMaxLast'),
        scope + '/mul_fold'
    ]
    self._AssertInputOpsAre(weights_quant, expected_inputs)
    output_op_name = scope + ('/weights_quant/delayed_quant/Switch_1'
                              if delay and use_ema else '/MatMul_Fold')
    self._AssertOutputGoesToOps(weights_quant, graph, [output_op_name])

    if with_bypass:
      conv_quant = graph.get_operation_by_name(scope + '/conv_quant/' +
                                               quantization_node_name)
      self.assertEqual(conv_quant.type, quantization_node_name)
      expected_inputs = [
          scope + '/conv_quant/AssignMinEma',
          scope + '/conv_quant/AssignMaxEma', scope + '/add_fold'
      ]
      self._AssertInputOpsAre(conv_quant, expected_inputs)
      output_op_name = (scope + '/conv_quant/delayed_quant/Switch_1'
                        if delay else 'test/Add')
      self._AssertOutputGoesToOps(conv_quant, graph, [output_op_name])

    act_quant = graph.get_operation_by_name('test/act_quant/' +
                                            quantization_node_name)
    self.assertEqual(act_quant.type, quantization_node_name)
    expected_inputs = [
        'test/act_quant/AssignMinEma', 'test/act_quant/AssignMaxEma',
        'test/' + activation_op_name
    ]
    self._AssertInputOpsAre(act_quant, expected_inputs)
    output_op_name = ('test/act_quant/delayed_quant/Switch_1'
                      if delay else 'control_dependency')
    self._AssertOutputGoesToOps(act_quant, graph, [output_op_name])
  def _TestQuantize_Conv2dWithoutBatchNorm(self, activation, activation_op_name,
                                           with_bypass, delay):
    """Tests quantization: inputs -> Conv2d no batch norm -> Activation.

    Args:
      activation: Callable that returns an Operation, a factory method for the
        Activation.
      activation_op_name: String, name of the Activation operation.
      with_bypass: Bool, when true there is an extra connection added from
        inputs to just before Activation.
      delay: Int (optional), delay in number of steps until quantization starts.
    """
    graph = ops.Graph()
    with graph.as_default():
      batch_size, height, width, depth = 5, 128, 128, 3
      inputs = array_ops.zeros((batch_size, height, width, depth))
      stride = 1 if with_bypass else 2
      out_depth = 3 if with_bypass else 32
      activation_fn = None if with_bypass else activation
      scope = 'test/test2' if with_bypass else 'test'
      node = conv2d(inputs, out_depth, [5, 5], stride=stride, padding='SAME',
                    weights_initializer=self._WeightInit(0.09),
                    activation_fn=activation_fn, scope=scope)
      if with_bypass:
        node = math_ops.add(inputs, node, name='test/Add')
        node = activation(node, name='test/' + activation_op_name)
      update_barrier = control_flow_ops.no_op(name='update_barrier')
      with ops.control_dependencies([update_barrier]):
        array_ops.identity(node, name='control_dependency')

      quantize.Quantize(graph, quant_delay=delay)
    quantization_node_name = 'FakeQuantWithMinMaxVars'
    weights_quant = graph.get_operation_by_name(scope + '/weights_quant/' +
                                                quantization_node_name)
    self.assertEqual(weights_quant.type, quantization_node_name)
    expected_inputs = [
        scope + '/weights_quant/AssignMinLast',
        scope + '/weights_quant/AssignMaxLast', scope + '/weights/read'
    ]
    self._AssertInputOpsAre(weights_quant, expected_inputs)
    if delay and delay > 0:
      output_op_name = scope + '/weights_quant/delayed_quant/Switch_1'
    else:
      output_op_name = scope + '/Conv2D'

    self._AssertOutputGoesToOps(weights_quant, graph, [output_op_name])

    if with_bypass:
      conv_quant = graph.get_operation_by_name(scope + '/conv_quant/' +
                                               quantization_node_name)
      self.assertEqual(conv_quant.type, quantization_node_name)
      expected_inputs = [
          scope + '/conv_quant/AssignMinEma',
          scope + '/conv_quant/AssignMaxEma', scope + '/BiasAdd'
      ]
      self._AssertInputOpsAre(conv_quant, expected_inputs)
      output_op_name = (scope + '/conv_quant/delayed_quant/Switch_1'
                        if delay else 'test/Add')
      self._AssertOutputGoesToOps(conv_quant, graph, [output_op_name])

    act_quant = graph.get_operation_by_name('test/act_quant/' +
                                            quantization_node_name)
    self.assertEqual(act_quant.type, quantization_node_name)

    expected_inputs = [
        'test/act_quant/AssignMinEma', 'test/act_quant/AssignMaxEma',
        'test/' + activation_op_name
    ]
    self._AssertInputOpsAre(act_quant, expected_inputs)
    output_op_name = ('test/act_quant/delayed_quant/Switch_1'
                      if delay else 'control_dependency')
    self._AssertOutputGoesToOps(act_quant, graph, [output_op_name])
Esempio n. 29
0
def main(_):
    if not FLAGS.dataset_dir:
        raise ValueError(
            'You must supply the dataset directory with --dataset_dir')

    tf.logging.set_verbosity(tf.logging.INFO)
    with tf.Graph().as_default():
        #######################
        # Config model_deploy #
        #######################
        deploy_config = model_deploy.DeploymentConfig(
            num_clones=FLAGS.num_clones,
            clone_on_cpu=FLAGS.clone_on_cpu,
            replica_id=FLAGS.task,
            num_replicas=FLAGS.worker_replicas,
            num_ps_tasks=FLAGS.num_ps_tasks)

        # Create global_step
        with tf.device(deploy_config.variables_device()):
            global_step = slim.create_global_step()

        ######################
        # Select the dataset #
        ######################
        dataset = dataset_factory.get_dataset(FLAGS.dataset_name,
                                              FLAGS.dataset_split_name,
                                              FLAGS.dataset_dir)

        ######################
        # Select the network #
        ######################
        network_fn = nets_factory.get_network_fn(
            FLAGS.model_name,
            num_classes=(dataset.num_classes - FLAGS.labels_offset),
            weight_decay=FLAGS.weight_decay,
            is_training=True)

        #####################################
        # Select the preprocessing function #
        #####################################
        preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name
        image_preprocessing_fn = preprocessing_factory.get_preprocessing(
            preprocessing_name, is_training=True)

        ##############################################################
        # Create a dataset provider that loads data from the dataset #
        ##############################################################
        with tf.device(deploy_config.inputs_device()):
            provider = slim.dataset_data_provider.DatasetDataProvider(
                dataset,
                num_readers=FLAGS.num_readers,
                common_queue_capacity=20 * FLAGS.batch_size,
                common_queue_min=10 * FLAGS.batch_size)
            [image, label] = provider.get(['image', 'label'])
            label -= FLAGS.labels_offset

            train_image_size = FLAGS.train_image_size or network_fn.default_image_size

            image = image_preprocessing_fn(image, train_image_size,
                                           train_image_size)

            images, labels = tf.train.batch(
                [image, label],
                batch_size=FLAGS.batch_size,
                num_threads=FLAGS.num_preprocessing_threads,
                capacity=5 * FLAGS.batch_size)
            labels = slim.one_hot_encoding(
                labels, dataset.num_classes - FLAGS.labels_offset)
            batch_queue = slim.prefetch_queue.prefetch_queue(
                [images, labels], capacity=2 * deploy_config.num_clones)

        ####################
        # Define the model #
        ####################
        def clone_fn(batch_queue):
            """Allows data parallelism by creating multiple clones of network_fn."""
            images, labels = batch_queue.dequeue()
            logits, end_points = network_fn(images)

            #############################
            # Specify the loss function #
            #############################
            if 'AuxLogits' in end_points:
                slim.losses.softmax_cross_entropy(
                    end_points['AuxLogits'],
                    labels,
                    label_smoothing=FLAGS.label_smoothing,
                    weights=0.4,
                    scope='aux_loss')
            slim.losses.softmax_cross_entropy(
                logits,
                labels,
                label_smoothing=FLAGS.label_smoothing,
                weights=1.0)
            return end_points

        # Gather initial summaries.
        summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))

        clones = model_deploy.create_clones(deploy_config, clone_fn,
                                            [batch_queue])
        first_clone_scope = deploy_config.clone_scope(0)
        # Gather update_ops from the first clone. These contain, for example,
        # the updates for the batch_norm variables created by network_fn.
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
                                       first_clone_scope)

        # Add summaries for end_points.
        end_points = clones[0].outputs
        for end_point in end_points:
            x = end_points[end_point]
            summaries.add(tf.summary.histogram('activations/' + end_point, x))
            summaries.add(
                tf.summary.scalar('sparsity/' + end_point,
                                  tf.nn.zero_fraction(x)))

        # Add summaries for losses.
        for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope):
            summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss))

        # Add summaries for variables.
        for variable in slim.get_model_variables():
            summaries.add(tf.summary.histogram(variable.op.name, variable))

        #################################
        # Configure the moving averages #
        #################################
        if FLAGS.moving_average_decay:
            moving_average_variables = slim.get_model_variables()
            variable_averages = tf.train.ExponentialMovingAverage(
                FLAGS.moving_average_decay, global_step)
        else:
            moving_average_variables, variable_averages = None, None

        #########################################
        # Configure the optimization procedure. #
        #########################################
        with tf.device(deploy_config.optimizer_device()):
            learning_rate = _configure_learning_rate(dataset.num_samples,
                                                     global_step)
            optimizer = _configure_optimizer(learning_rate)
            summaries.add(tf.summary.scalar('learning_rate', learning_rate))

        if FLAGS.sync_replicas:
            # If sync_replicas is enabled, the averaging will be done in the chief
            # queue runner.
            optimizer = tf.train.SyncReplicasOptimizer(
                opt=optimizer,
                replicas_to_aggregate=FLAGS.replicas_to_aggregate,
                total_num_replicas=FLAGS.worker_replicas,
                variable_averages=variable_averages,
                variables_to_average=moving_average_variables)
        elif FLAGS.moving_average_decay:
            # Update ops executed locally by trainer.
            update_ops.append(
                variable_averages.apply(moving_average_variables))

        # Variables to train.
        variables_to_train = _get_variables_to_train()

        #  and returns a train_tensor and summary_op
        total_loss, clones_gradients = model_deploy.optimize_clones(
            clones, optimizer, var_list=variables_to_train)
        # Add total_loss to summary.
        summaries.add(tf.summary.scalar('total_loss', total_loss))

        # Create gradient updates.
        grad_updates = optimizer.apply_gradients(clones_gradients,
                                                 global_step=global_step)
        update_ops.append(grad_updates)

        update_op = tf.group(*update_ops)
        with tf.control_dependencies([update_op]):
            train_tensor = tf.identity(total_loss, name='train_op')

        # Quantize training graph
        g = tf.get_default_graph()
        fold_batch_norms.FoldBatchNorms(g)
        quantize.Quantize(g, is_training=True)
        for var in g.get_collection('variables'):
            if var.name.endswith('min:0') or var.name.endswith('max:0'):
                summaries.add(tf.summary.scalar(var.name, var))

        # Add the summaries from the first clone. These contain the summaries
        # created by model_fn and either optimize_clones() or _gather_clone_loss().
        summaries |= set(
            tf.get_collection(tf.GraphKeys.SUMMARIES, first_clone_scope))

        # Merge all summaries together.
        summary_op = tf.summary.merge(list(summaries), name='summary_op')

        ###########################
        # Kicks off the training. #
        ###########################
        slim.learning.train(
            train_tensor,
            logdir=FLAGS.train_dir,
            master=FLAGS.master,
            is_chief=(FLAGS.task == 0),
            init_fn=_get_init_fn(),
            summary_op=summary_op,
            number_of_steps=FLAGS.max_number_of_steps,
            log_every_n_steps=FLAGS.log_every_n_steps,
            save_summaries_secs=FLAGS.save_summaries_secs,
            save_interval_secs=FLAGS.save_interval_secs,
            sync_optimizer=optimizer if FLAGS.sync_replicas else None)
Esempio n. 30
0
                                num_outputs=oc,
                                kernel_size=kernel_,
                                stride=stride_,
                                padding=pad_)

init = tf.global_variables_initializer()
with tf.Session() as sess:
    sess.run(init)
    #fp32_data = sess.run(inputs, feed_dict = {inputs: dummy_data})
    start = time.time()
    fp32_conv = sess.run(conv, feed_dict={inputs: dummy_data})
    end = time.time()
    #saver.save(sess, "checkpoint/conv")

    print("elapsed time: %.8f sec" % (end - start))

#----------------------------- Int8 conv forward---------------------
graph = tf.get_default_graph()
quantize.Quantize(graph, is_training=False, weight_bits=8, activation_bits=8)
init = tf.global_variables_initializer()
with tf.Session() as sess:
    sess.run(init)
    convq = graph.get_tensor_by_name(
        "Conv/act_quant/FakeQuantWithMinMaxVars:0")

    start_int8 = time.time()
    int8_conv = sess.run(convq, feed_dict={inputs: dummy_data})
    end_int8 = time.time()

    print("elapsed time: %.8f sec" % (end - start))