コード例 #1
0
  def _BuildGraph(self, x):

    def _Quantize(x, r):
      x = gen_array_ops.quantize_and_dequantize_v2(x, -r, r)
      return x

    def _DenseLayer(x, num_inputs, num_outputs, quantization_range, name):
      """Dense layer with quantized outputs.

      Args:
        x: input to the dense layer
        num_inputs: number of input columns of x
        num_outputs: number of output columns
        quantization_range: the min/max range for quantization
        name: name of the variable scope

      Returns:
        The output of the layer.
      """
      with variable_scope.variable_scope(name):
        kernel = variable_scope.get_variable(
            'kernel',
            shape=[num_inputs, num_outputs],
            dtype=dtypes.float32,
            initializer=init_ops.GlorotUniform())
        bias = variable_scope.get_variable(
            'bias',
            shape=[num_outputs],
            dtype=dtypes.float32,
            initializer=init_ops.Zeros())
        x = math_ops.matmul(x, kernel)
        x = _Quantize(x, quantization_range)
        x = nn.bias_add(x, bias)
        x = _Quantize(x, quantization_range)
      return x

    x = _Quantize(x, 1)
    # Conv + Bias + Relu6
    x = layers.conv2d(x, filters=32, kernel_size=3, use_bias=True)
    x = nn.relu6(x)
    # Conv + Bias + Relu6
    x = layers.conv2d(x, filters=64, kernel_size=3, use_bias=True)
    x = nn.relu6(x)
    # Reduce
    x = math_ops.reduce_mean(x, [1, 2])
    x = _Quantize(x, 6)
    # FC1
    x = _DenseLayer(x, 64, 512, 6, name='dense')
    x = nn.relu6(x)
    # FC2
    x = _DenseLayer(x, 512, 10, 25, name='dense_1')
    x = array_ops.identity(x, name=OUTPUT_NODE_NAME)
    return x
コード例 #2
0
  def _BuildGraph(self, x):

    def _Quantize(x, r):
      x = gen_array_ops.quantize_and_dequantize_v2(x, -r, r)
      return x

    def _DenseLayer(x, num_inputs, num_outputs, quantization_range, name):
      """Dense layer with quantized outputs.

      Args:
        x: input to the dense layer
        num_inputs: number of input columns of x
        num_outputs: number of output columns
        quantization_range: the min/max range for quantization
        name: name of the variable scope

      Returns:
        The output of the layer.
      """
      with variable_scope.variable_scope(name):
        kernel = variable_scope.get_variable(
            'kernel',
            shape=[num_inputs, num_outputs],
            dtype=dtypes.float32,
            initializer=keras.initializers.glorot_uniform())
        bias = variable_scope.get_variable(
            'bias',
            shape=[num_outputs],
            dtype=dtypes.float32,
            initializer=keras.initializers.zeros())
        x = math_ops.matmul(x, kernel)
        x = _Quantize(x, quantization_range)
        x = nn.bias_add(x, bias)
        x = _Quantize(x, quantization_range)
      return x

    x = _Quantize(x, 1)
    # Conv + Bias + Relu6
    x = layers.conv2d(x, filters=32, kernel_size=3, use_bias=True)
    x = nn.relu6(x)
    # Conv + Bias + Relu6
    x = layers.conv2d(x, filters=64, kernel_size=3, use_bias=True)
    x = nn.relu6(x)
    # Reduce
    x = math_ops.reduce_mean(x, [1, 2])
    x = _Quantize(x, 6)
    # FC1
    x = _DenseLayer(x, 64, 512, 6, name='dense')
    x = nn.relu6(x)
    # FC2
    x = _DenseLayer(x, 512, 10, 25, name='dense_1')
    x = array_ops.identity(x, name=OUTPUT_NODE_NAME)
    return x
コード例 #3
0
    def call(self, inputs):
        # alpha is used for leaky relu slope in activations instead of
        # negative_slope.

        x = inputs
        if self.negative_slope != 0.0:
            #tf.print(self.negative_slope, output_stream=sys.stdout)
            #if tf.math.not_equal( self.negative_slope, 0.0 ):

            if self.max_value is None and self.threshold == 0:
                #return nn.leaky_relu(x, alpha=self.negative_slope)
                return K.relu(x, alpha=self.negative_slope)

            if self.threshold != 0:
                #negative_part = nn.relu(-x + self.threshold)
                negative_part = K.relu(-x + self.threshold)
            else:
                #negative_part = nn.relu(-x)
                negative_part = K.relu(-x)

        #clip_max = max_value != None #Note: This may not evaluate to false in graph mode
        clip_max = False

        if self.threshold != 0:
            # computes x for x > threshold else 0
            #x = x * math_ops.cast(math_ops.greater(x, threshold), K.floatx())
            #x = x * math_ops.cast(math_ops.greater(x, self.threshold), x.dtype.base_dtype) + self.threshold * math_ops.cast(math_ops.greater_equal(self.threshold, x), x.dtype.base_dtype)
            #x = x * math_ops.greater(x, threshold) + threshold * math_ops.greater_equal(threshold, x)
            x = x * tf.cast(tf.math.greater(x, self.threshold),
                            x.dtype.base_dtype) + self.threshold * tf.cast(
                                tf.greater(self.threshold, x),
                                x.dtype.base_dtype)
        elif self.max_value == 6:
            # if no threshold, then can use nn.relu6 native TF op for performance
            x = nn.relu6(x)
            clip_max = False
        else:
            x = nn.relu(x)

        if clip_max == True:
            self.max_value = K._to_tensor(self.max_value, x.dtype.base_dtype)
            #zero = K._to_tensor(0., x.dtype.base_dtype)
            x = clip_ops.clip_by_value(x, self.threshold, self.max_value)

        if self.negative_slope != 0.:
            self.negative_slope = K._to_tensor(self.negative_slope,
                                               x.dtype.base_dtype)
            x -= self.negative_slope * negative_part

        return x
コード例 #4
0
def template(x_shape=[2, 3, 4, 5], description: str = ""):
    from tensorflow.python.ops import nn
    x = tf.placeholder(np.float32, x_shape, "x")
    y = nn.relu6(x)

    vx = np.random.rand(*x_shape).astype(np.float32) - 0.5
    with tf.Session() as sess:
        vy, = sess.run([y], {x: vx})

        graph = TensorFlowConverter(sess, batch_size=2).convert([x], [y])

    generate_kernel_test_case(description=f"[TensorFlow] Relu6 {description}",
                              graph=graph,
                              inputs={graph.inputs[0]: vx},
                              expected={graph.outputs[0]: vy})
コード例 #5
0
  def testSinglePartitionedVariable(self):
    """Ensures partitioned variables fail cleanly with freeze graph."""
    checkpoint_prefix = os.path.join(self.get_temp_dir(), "saved_checkpoint")
    checkpoint_state_name = "checkpoint_state"
    input_graph_name = "input_graph.pb"
    output_graph_name = "output_graph.pb"

    # Create a graph with partition variables. When weights are partitioned into
    # a single partition, the weights variable is followed by a identity ->
    # identity (an additional identity node).
    partitioner = partitioned_variables.fixed_size_partitioner(1)
    with ops.Graph().as_default():
      with variable_scope.variable_scope("part", partitioner=partitioner):
        batch_size, height, width, depth = 5, 128, 128, 3
        input1 = array_ops.zeros(
            (batch_size, height, width, depth), name="input1")
        input2 = array_ops.zeros(
            (batch_size, height, width, depth), name="input2")

        num_nodes = depth
        filter1 = variable_scope.get_variable("filter", [num_nodes, num_nodes])
        filter2 = array_ops.reshape(filter1, [1, 1, num_nodes, num_nodes])
        conv = nn.conv2d(
            input=input1, filter=filter2, strides=[1, 1, 1, 1], padding="SAME")
        node = math_ops.add(conv, input2, name="test/add")
        node = nn.relu6(node, name="test/relu6")

      # Save graph and checkpoints.
      sess = session.Session()
      sess.run(variables.global_variables_initializer())

      saver = saver_lib.Saver()
      checkpoint_path = saver.save(
          sess,
          checkpoint_prefix,
          global_step=0,
          latest_filename=checkpoint_state_name)
      graph_io.write_graph(sess.graph, self.get_temp_dir(), input_graph_name)

      # Ensure this graph has partition variables.
      self.assertTrue([
          tensor.name.split(":")[0]
          for op in sess.graph.get_operations()
          for tensor in op.values()
          if re.search(r"/part_\d+/", tensor.name)
      ])

    # Test freezing graph doesn't make it crash.
    output_node_names = "save/restore_all"
    output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name)

    return_value = freeze_graph.freeze_graph_with_def_protos(
        input_graph_def=sess.graph_def,
        input_saver_def=None,
        input_checkpoint=checkpoint_path,
        output_node_names=output_node_names,
        restore_op_name="save/restore_all",  # default value
        filename_tensor_name="save/Const:0",  # default value
        output_graph=output_graph_path,
        clear_devices=False,
        initializer_nodes="")
    self.assertTrue(return_value, -1)
コード例 #6
0
    def testSinglePartitionedVariable(self):
        """Ensures partitioned variables fail cleanly with freeze graph."""
        checkpoint_prefix = os.path.join(self.get_temp_dir(),
                                         "saved_checkpoint")
        checkpoint_state_name = "checkpoint_state"
        input_graph_name = "input_graph.pb"
        output_graph_name = "output_graph.pb"

        # Create a graph with partition variables. When weights are partitioned into
        # a single partition, the weights variable is followed by a identity ->
        # identity (an additional identity node).
        partitioner = partitioned_variables.fixed_size_partitioner(1)
        with ops.Graph().as_default():
            with variable_scope.variable_scope("part",
                                               partitioner=partitioner):
                batch_size, height, width, depth = 5, 128, 128, 3
                input1 = array_ops.zeros((batch_size, height, width, depth),
                                         name="input1")
                input2 = array_ops.zeros((batch_size, height, width, depth),
                                         name="input2")

                num_nodes = depth
                filter1 = variable_scope.get_variable("filter",
                                                      [num_nodes, num_nodes])
                filter2 = array_ops.reshape(filter1,
                                            [1, 1, num_nodes, num_nodes])
                conv = nn.conv2d(input=input1,
                                 filter=filter2,
                                 strides=[1, 1, 1, 1],
                                 padding="SAME")
                node = math_ops.add(conv, input2, name="test/add")
                node = nn.relu6(node, name="test/relu6")

            # Save graph and checkpoints.
            sess = session.Session()
            sess.run(variables.global_variables_initializer())

            saver = saver_lib.Saver()
            checkpoint_path = saver.save(sess,
                                         checkpoint_prefix,
                                         global_step=0,
                                         latest_filename=checkpoint_state_name)
            graph_io.write_graph(sess.graph, self.get_temp_dir(),
                                 input_graph_name)

            # Ensure this graph has partition variables.
            self.assertTrue([
                tensor.name.split(":")[0]
                for op in sess.graph.get_operations()
                for tensor in op.values()
                if re.search(r"/part_\d+/", tensor.name)
            ])

        # Test freezing graph doesn't make it crash.
        output_node_names = "save/restore_all"
        output_graph_path = os.path.join(self.get_temp_dir(),
                                         output_graph_name)

        return_value = freeze_graph.freeze_graph_with_def_protos(
            input_graph_def=sess.graph_def,
            input_saver_def=None,
            input_checkpoint=checkpoint_path,
            output_node_names=output_node_names,
            restore_op_name="save/restore_all",  # default value
            filename_tensor_name="save/Const:0",  # default value
            output_graph=output_graph_path,
            clear_devices=False,
            initializer_nodes="")
        self.assertTrue(return_value, -1)
コード例 #7
0
def relu6(x):
    return nn.relu6(x)