Example #1
0
def _calc_mat_mul_flops(graph, node):
  """Calculates the compute resources needed for MatMul."""
  transpose_a = node.attr["transpose_a"].b
  a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
  a_shape.assert_is_fully_defined()
  if transpose_a:
    k = int(a_shape[0])
  else:
    k = int(a_shape[1])
  output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
  output_shape.assert_is_fully_defined()
  output_count = np.prod(output_shape.as_list())
  return ops.OpStats("flops", (k * output_count * 2))
Example #2
0
def _calc_depthwise_conv_flops(graph, node):
  """Calculates the compute resources needed for DepthwiseConv2dNative."""
  input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
  input_shape.assert_is_fully_defined()
  filter_shape = graph_util.tensor_shape_from_node_def_name(graph,
                                                            node.input[1])
  filter_shape.assert_is_fully_defined()
  output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
  output_shape.assert_is_fully_defined()
  filter_height = int(filter_shape[0])
  filter_width = int(filter_shape[1])
  output_count = np.prod(output_shape.as_list())
  return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
Example #3
0
def _calc_conv_weight_params(graph, node):
    """Calculates the on-disk size of the weights for Conv2D."""
    input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
    input_shape.assert_is_fully_defined()
    filter_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[1])
    filter_shape.assert_is_fully_defined()
    output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
    output_shape.assert_is_fully_defined()
    filter_height = int(filter_shape[0])
    filter_width = int(filter_shape[1])
    filter_in_depth = int(filter_shape[2])
    filter_out_depth = int(filter_shape[3])
    return ops.OpStats("weight_parameters", (filter_height * filter_width * filter_in_depth * filter_out_depth))
Example #4
0
def _calc_depthwise_conv_flops(graph, node):
    """Calculates the compute resources needed for DepthwiseConv2dNative."""
    input_shape = graph_util.tensor_shape_from_node_def_name(
        graph, node.input[0])
    input_shape.assert_is_fully_defined()
    filter_shape = graph_util.tensor_shape_from_node_def_name(
        graph, node.input[1])
    filter_shape.assert_is_fully_defined()
    output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
    output_shape.assert_is_fully_defined()
    filter_height = int(filter_shape[0])
    filter_width = int(filter_shape[1])
    output_count = np.prod(output_shape.as_list())
    return ops.OpStats("flops",
                       (output_count * filter_height * filter_width * 2))
Example #5
0
def _calc_conv_weight_params(graph, node):
  """Calculates the on-disk size of the weights for Conv2D."""
  input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
  input_shape.assert_is_fully_defined()
  filter_shape = graph_util.tensor_shape_from_node_def_name(graph,
                                                            node.input[1])
  filter_shape.assert_is_fully_defined()
  output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
  output_shape.assert_is_fully_defined()
  filter_height = int(filter_shape[0])
  filter_width = int(filter_shape[1])
  filter_in_depth = int(filter_shape[2])
  filter_out_depth = int(filter_shape[3])
  return ops.OpStats("weight_parameters", (filter_height * filter_width *
                                           filter_in_depth * filter_out_depth))
Example #6
0
def _calc_bias_add_weight_params(graph, node):
    """Calculates the on-disk weight parameters for BiasAdd."""
    bias_shape = graph_util.tensor_shape_from_node_def_name(
        graph, node.input[1])
    bias_shape.assert_is_fully_defined()
    bias_count = np.prod(bias_shape.as_list())
    return ops.OpStats("weight_parameters", bias_count)
Example #7
0
def _calc_bias_add_flops(graph, node):
    """Calculates the computing needed for BiasAdd."""
    input_shape = graph_util.tensor_shape_from_node_def_name(
        graph, node.input[0])
    input_shape.assert_is_fully_defined()
    input_count = np.prod(input_shape.as_list())
    return ops.OpStats("flops", input_count)
Example #8
0
def _calc_mat_mul_weight_parameters(graph, node):
    """Calculates the on-disk size of the weights for MatMul."""
    # We assume here that the weights are always in the second input to the op,
    # which is generally true by convention for fully-connected layers, but not
    # enforced or checked.
    weights_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[1])
    weights_shape.assert_is_fully_defined()
    return ops.OpStats("weight_parameters", (int(weights_shape[1]) * int(weights_shape[0])))
Example #9
0
def _calc_mat_mul_weight_parameters(graph, node):
  """Calculates the on-disk size of the weights for MatMul."""
  # We assume here that the weights are always in the second input to the op,
  # which is generally true by convention for fully-connected layers, but not
  # enforced or checked.
  weights_shape = graph_util.tensor_shape_from_node_def_name(graph,
                                                             node.input[1])
  weights_shape.assert_is_fully_defined()
  return ops.OpStats("weight_parameters",
                     (int(weights_shape[1]) * int(weights_shape[0])))
Example #10
0
def _calc_dilation2d_weight_params(graph, node):
  """Calculates the on-disk size of the weights for Dilation2D."""
  filter_shape = graph_util.tensor_shape_from_node_def_name(graph,
                                                            node.input[1])
  filter_shape.assert_is_fully_defined()
  filter_height = int(filter_shape[0])
  filter_width = int(filter_shape[1])
  filter_depth = int(filter_shape[2])
  return ops.OpStats("weight_parameters",
                     (filter_height * filter_width * filter_depth))
Example #11
0
def _calc_bias_add_weight_params(graph, node):
  """Calculates the on-disk weight parameters for BiasAdd."""
  bias_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[1])
  bias_shape.assert_is_fully_defined()
  bias_count = np.prod(bias_shape.as_list())
  return ops.OpStats("weight_parameters", bias_count)
Example #12
0
def _calc_bias_add_flops(graph, node):
  """Calculates the computing needed for BiasAdd."""
  input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
  input_shape.assert_is_fully_defined()
  input_count = np.prod(input_shape.as_list())
  return ops.OpStats("flops", input_count)