Exemplo n.º 1
0
  def test_naming_of_tunable(self):
    # If this test is failing, it is because the user have registered two
    # tunable blocks with names that substrings of one another.
    blocks = blocks_builder.Blocks()
    names = []
    for k, v in blocks._block_builders.items():
      if v is not None:
        hps = v.requires_hparams()
        if hps:
          names.append(k)

    for idx, name in enumerate(names):
      for idx2, name2 in enumerate(names):
        if idx != idx2:
          self.assertNotStartsWith(name.name, name2.name)
Exemplo n.º 2
0
def increase_structure_depth(previous_architecture, added_block, problem_type):
    """Returns new structure given the old one and the added block.

  Increases the depth of the neural network by adding `added_block`.
  For the case of cnns, if the block is convolutional, it will add it before
  the flattening operation. Otherwise, if it is a dense block, then it will
  be added at the end.
  For the dnn and rnn case, the added_block is always added at the end.

  Args:
    previous_architecture: the input architecture. An np.array holding
      `blocks.BlockType` (i.e., holding integers).
    added_block: a `blocks.BlockType` to add to previous_architecture.
    problem_type: a `PhoenixSpec.ProblemType` enum.

  Returns:
    np.array of `blocks.BlockType` (integers).
  """
    if added_block == blocks.BlockType.EMPTY_BLOCK:
        return previous_architecture.copy()
    output = previous_architecture.copy()

    # No problems for DNN of RNN
    if problem_type != phoenix_spec_pb2.PhoenixSpec.CNN:
        return np.append(output, added_block)

    # TODO(b/172564129): Change this class (blocks) to a singleton
    builder = blocks.Blocks()
    # CNN case - convolution before fully connected.
    if not builder[added_block].is_input_order_important:
        return np.append(output, added_block)
    # First block index in which order is not important
    index_for_new_block = next(
        index for index, block in enumerate(previous_architecture)
        if not builder[block].is_input_order_important)
    return np.insert(output, index_for_new_block, added_block)
Exemplo n.º 3
0
 def test_all_blocks_are_there(self):
   blocks = blocks_builder.Blocks()
   for block_type in blocks_builder.BlockType:
     if block_type == blocks_builder.BlockType.EMPTY_BLOCK:
       continue
     blocks[block_type]  # pylint: disable=pointless-statement
Exemplo n.º 4
0
 def test_constructor(self):
   blocks = blocks_builder.Blocks()
   input_tensor = tf.zeros([3, 32, 32, 3])
   block_type = blocks_builder.BlockType.FIXED_CHANNEL_CONVOLUTION_16
   _ = blocks[block_type].build([input_tensor], is_training=True)
Exemplo n.º 5
0
def construct_tower(phoenix_spec,
                    input_tensor,
                    tower_name,
                    architecture,
                    is_training,
                    lengths,
                    logits_dimension,
                    is_frozen,
                    dropout_rate=None,
                    allow_auxiliary_head=False):
    """Creates a tower giving an architecture.

  Args:
    phoenix_spec: The trial's `phoenix_spec_pb2.PhoenixSpec` proto.
    input_tensor: An input `tf.Tensor` to build the network on top of.
    tower_name: a unique name for the tower (string).
    architecture: np.array of ints (`blocks.BlockType`) with the architecture of
      the neural network to build.
    is_training: a boolean indicating if we are in training.
    lengths: A `tf.Tensor` with the lengths (dimenions: [batch_size]) holding
      the length of each sequence for sequential problems. Keep as None, for non
      sequential problems.
    logits_dimension: The last axis dimension of the logits.
    is_frozen: Is the tower frozen - integer and not boolean.
    dropout_rate: a float indicating the rate of dropouts to apply between
      blocks. Applied only if the value is above zero.
    allow_auxiliary_head: Whether to allow importing the tower's auxiliary head,
      if the tower has one. Only applicable for CNNs.

  Returns:
    The output `tf.Tensor` of the last layer in the built neural network.
  """
    blocks_builders = blocks.Blocks()
    output = [input_tensor]
    block_index = 1
    str_signature = ""
    with tf.compat.v1.variable_scope("Phoenix/{}".format(tower_name)):
        for block_type in architecture:
            str_signature += str(block_type)
            # TODO(b/172564129): Should block_index also be ignored when uniform
            # average transfer learning? How would we handle repeated blocks, e.g. two
            # FC layers stacked on top of each other.
            scope = "{0}_{1}_{2}".format(str(block_index),
                                         blocks.BlockType(block_type).name,
                                         str_signature)
            scope = strip_scope(
                scope,
                phoenix_spec.transfer_learning_spec.transfer_learning_type,
                str_signature)
            with tf.compat.v1.variable_scope(scope):
                with (arg_scope(DATA_FORMAT_OPS,
                                data_format=phoenix_spec.cnn_data_format)):
                    output = blocks_builders[block_type].build(
                        input_tensors=output,
                        is_training=is_training,
                        lengths=lengths)
                    if dropout_rate and dropout_rate > 0:
                        output[-1] = tf.compat.v1.layers.dropout(
                            output[-1],
                            rate=dropout_rate,
                            training=is_training)
                    block_index += 1

        # Create the logits.
        scope = "last_dense_{}".format(str_signature)
        scope = strip_scope(
            scope, phoenix_spec.transfer_learning_spec.transfer_learning_type,
            str_signature)
        with tf.compat.v1.variable_scope(scope):
            tower_spec = create_tower_spec(phoenix_spec, output, architecture,
                                           logits_dimension, is_frozen,
                                           lengths, allow_auxiliary_head)

    set_architecture(architecture, tower_name)
    set_parameter(tower_name, DROPOUTS,
                  (-1.0 if dropout_rate is None else dropout_rate), tf.float32)
    set_parameter(tower_name, IS_FROZEN, int(is_frozen))
    return tower_spec
Exemplo n.º 6
0
    raise NotImplementedError(
        "ConcatCombiner does not know how to deal with inputs of these shapes. "
        "Input shapes: {}".format(input_shapes))


class CombinerType(enum.IntEnum):
  CONCAT = 0
  IDENTITY = 1


COMBINER_MAP = {
    CombinerType.CONCAT: ConcatCombiner(),
    CombinerType.IDENTITY: IdentityCombiner(),
}

BLOCK_BUILDER_MAP = blocks_builder.Blocks()


class Node(
    collections.namedtuple("Node",
                           ["block_type", "input_indices", "combiner_type"])):
  """Container for the repeated units in an Architecture."""

  def __new__(cls, block_type, input_indices=None, combiner_type=None):
    """Constructs an Node.

    Args:
      block_type: int for the Block type.
      input_indices: List of ints that determines which previous layers to pass
        to the next Combiner. For example, [-1] means to pass the only the
        output of the previous block, [-2, -1] means to pass the outputs of the