def read_model(model, input_shape, f_activation='relu'):
    """Reads the operations on a single EfficientNet block.

    Args:
        model: efficientnet_model.Model,
        input_shape: int, square image assumed.
        f_activation: str or None, one of 'relu', 'swish', None.

    Returns:
        list, of operations.
    """
    _ = model(torch.ones(input_shape))
    input_size = input_shape[2]  # Assuming square
    ops = []
  # 1
    l_name = 'conv1'
    layer = getattr(model, l_name)
#  aa = list(layer.weight[0].shape)
    layer_temp = counting.Conv2D(
        input_size, list(layer.weight.shape), layer.stride,
        layer.padding, True, f_activation)  # Use bias true since batch_norm
    ops.append((l_name, layer_temp))
  # Input size might have changed.
    input_size = counting.get_conv_output_size(
        image_size=input_size, filter_size=layer_temp.kernel_shape[2],
        padding=layer_temp.padding, stride=layer_temp.strides[0])

    for idx, block in enumerate(model.layers):
      #  print(idx)
        if idx == 0 or idx == 1 or idx == 10 or idx == 16:
            block_ops, input_size = read_block(block, input_size, f_activation=f_activation)
            ops.append(('block_%d' % idx, block_ops))
        else:
            block_ops, input_size = read_block_without_shortcut(block, input_size, f_activation=f_activation)
            ops.append(('block_%d' % idx, block_ops))

    l_name = 'conv2'
    layer = getattr(model, l_name)
  #  aa = list(layer.weight[0].shape)
    layer_temp = counting.Conv2D(
        input_size, list(layer.weight.shape), layer.stride,
        layer.padding, True, f_activation)  # Use bias true since batch_norm
    ops.append((l_name, layer_temp))
  # Input size might have changed.
    input_size = counting.get_conv_output_size(
        image_size=input_size, filter_size=layer_temp.kernel_shape[2],
        padding=layer_temp.padding, stride=layer_temp.strides[0])

  # Blocks


  # Head
    l_name = 'linear'
    layer = getattr(model, l_name)
    shapetemp = [layer.in_features, layer.out_features]
    ops.append(('_fc', counting.FullyConnected(
        list(shapetemp), True, None)))

    return ops
def read_block(block, input_size, f_activation='swish'):
    """Reads the operations on a single EfficientNet block.

    Args:
        block: efficientnet_model.MBConvBlock,
        input_shape: int, square image assumed.
        f_activation: str or None, one of 'relu', 'swish', None.

    Returns:
        list, of operations.
    """
    ops = []

    #shortcut does not need to change the input size
    l_name = 'shortcut'
    layer = getattr(block, l_name)[0]
    layer_temp = counting.Conv2D(
        input_size, list(layer.weight.shape), layer.stride,
        layer.padding, True, f_activation)  # Use bias true since batch_norm
    ops.append((l_name, layer_temp))


    l_name = 'conv1'
    layer = getattr(block, l_name)
    layer_temp = counting.Conv2D(
        input_size, list(layer.weight.shape), layer.stride,
        layer.padding, True, f_activation)  # Use bias true since batch_norm
    ops.append((l_name, layer_temp))
  # Input size might have changed.
    input_size = counting.get_conv_output_size(
        image_size=input_size, filter_size=layer_temp.kernel_shape[2],
        padding=layer_temp.padding, stride=layer_temp.strides[0])

    l_name = 'conv2'
    layer = getattr(block, l_name)
    layer_temp = counting.Conv2D(
        input_size, list(layer.weight.shape), layer.stride,
        layer.padding, True, f_activation)  # Use bias true since batch_norm
    ops.append((l_name, layer_temp))
    # Input size might have changed.
    input_size = counting.get_conv_output_size(
        image_size=input_size, filter_size=layer_temp.kernel_shape[2],
        padding=layer_temp.padding, stride=layer_temp.strides[0])

    l_name = 'conv3'
    layer = getattr(block, l_name)
    layer_temp = counting.Conv2D(
        input_size, list(layer.weight.shape), layer.stride,
        layer.padding, True, f_activation)  # Use bias true since batch_norm
    ops.append((l_name, layer_temp))
    # Input size might have changed.
    input_size = counting.get_conv_output_size(
        image_size=input_size, filter_size=layer_temp.kernel_shape[2],
        padding=layer_temp.padding, stride=layer_temp.strides[0])


    return ops, input_size
def read_model(model, input_shape, f_activation='swish'):
  """Reads the operations on a single EfficientNet block.

  Args:
    model: efficientnet_model.Model,
    input_shape: int, square image assumed.
    f_activation: str or None, one of 'relu', 'swish', None.

  Returns:
    list, of operations.
  """
  # Ensure that the input run through model
  _ = model(tf.ones(input_shape))
  input_size = input_shape[1]  # Assuming square
  ops = []
  # 1
  l_name = '_conv_stem'
  layer = getattr(model, l_name)
  layer_temp = counting.Conv2D(
      input_size, layer.weights[0].shape.as_list(), layer.strides,
      layer.padding, True, f_activation)  # Use bias true since batch_norm
  ops.append((l_name, layer_temp))
  # Input size might have changed.
  input_size = counting.get_conv_output_size(
      image_size=input_size, filter_size=layer_temp.kernel_shape[0],
      padding=layer_temp.padding, stride=layer_temp.strides[0])

  # Blocks
  for idx, block in enumerate(model._blocks):
    block_ops, input_size = read_block(block, input_size,
                                       f_activation=f_activation)
    ops.append(('block_%d' % idx, block_ops))

  # Head
  l_name = '_conv_head'
  layer = getattr(model, l_name)
  layer_temp = counting.Conv2D(
      input_size, layer.weights[0].shape.as_list(), layer.strides,
      layer.padding, True, f_activation)  # Use bias true since batch_norm
  n_channels_out = layer.weights[0].shape.as_list()[-1]
  ops.append((l_name, layer_temp))

  ops.append(('_avg_pooling', counting.GlobalAvg(input_size, n_channels_out)))

  return ops
def read_block(block, input_size, f_activation='swish'):
  """Reads the operations on a single EfficientNet block.

  Args:
    block: efficientnet_model.MBConvBlock,
    input_shape: int, square image assumed.
    f_activation: str or None, one of 'relu', 'swish', None.

  Returns:
    list, of operations.
  """

  conv_counter_class = {
      efficientnet_model.MBConvBlock: counting.Conv2D,
      efficientnet_model.MBConvBlockBinary: counting.Conv2DBinary,
  }[type(block)]

  ops = []
  # 1
  l_name = '_expand_conv'
  if hasattr(block, l_name):
    layer = getattr(block, l_name)
    layer_temp = conv_counter_class(
        input_size, layer.kernel.shape.as_list(), layer.strides, layer.padding,
        True, f_activation)  # Use bias true since batch_norm
    ops.append((l_name, layer_temp))
  # 2
  l_name = '_depthwise_conv'
  layer = getattr(block, l_name)
  layer_temp = counting.DepthWiseConv2D(
      input_size, layer.weights[0].shape.as_list(), layer.strides,
      layer.padding, True, f_activation)  # Use bias true since batch_norm
  ops.append((l_name, layer_temp))
  # Input size might have changed.
  input_size = counting.get_conv_output_size(
      image_size=input_size, filter_size=layer_temp.kernel_shape[0],
      padding=layer_temp.padding, stride=layer_temp.strides[0])
  # 3
  if block._has_se:
    se_reduce = getattr(block, '_se_reduce')
    se_expand = getattr(block, '_se_expand')
    # Kernel has the input features in its second dimension.
    n_channels = se_reduce.kernel.shape.as_list()[2]
    ops.append(('_se_reduce_mean', counting.GlobalAvg(input_size, n_channels)))
    # input size is 1
    layer_temp = conv_counter_class(
        1, se_reduce.kernel.shape.as_list(), se_reduce.strides,
        se_reduce.padding, True, f_activation)
    ops.append(('_se_reduce', layer_temp))
    layer_temp = conv_counter_class(
        1, se_expand.kernel.shape.as_list(), se_expand.strides,
        se_expand.padding, True, 'sigmoid')
    ops.append(('_se_expand', layer_temp))
    ops.append(('_se_scale', counting.Scale(input_size, n_channels)))

  # 4
  l_name = '_project_conv'
  layer = getattr(block, l_name)
  layer_temp = conv_counter_class(
      input_size, layer.kernel.shape.as_list(), layer.strides, layer.padding,
      True, None)  # Use bias true since batch_norm, no activation
  ops.append((l_name, layer_temp))

  if (block._block_args.id_skip
      and all(s == 1 for s in block._block_args.strides)
      and block._block_args.input_filters == block._block_args.output_filters):
    ops.append(('_skip_add', counting.Add(input_size, n_channels)))
  return ops, input_size