示例#1
0
 def __call__(self, inputs):
     self.inputs = layer_utils.format_inputs(inputs, self.name)
     for input_node in self.inputs:
         input_node.add_out_hypermodel(self)
     self.outputs = []
     for _ in range(self._num_output_node):
         output_node = hyper_node.Node()
         output_node.add_in_hypermodel(self)
         self.outputs.append(output_node)
     return self.outputs
示例#2
0
    def build(self, hp, inputs=None):
        # TODO: make it more advanced, selecting from multiple models, e.g.,
        #  ResNet.
        input_node = layer_utils.format_inputs(inputs, self.name, num=1)[0]
        output_node = input_node

        for i in range(hp.Choice('num_layers', [1, 2, 3], default=2)):
            output_node = tf.keras.layers.Conv2D(
                hp.Choice('units_{i}'.format(i=i), [16, 32, 64], default=32),
                hp.Choice('kernel_size_{i}'.format(i=i), [3, 5, 7],
                          default=3))(output_node)
        return output_node
示例#3
0
    def build(self, hp, inputs=None):
        input_node = layer_utils.format_inputs(inputs, self.name, num=1)[0]
        output_node = input_node
        if len(self.output_shape) == 1:
            output_node = hyper_block.Flatten().build(hp, output_node)
            output_node = tf.keras.layers.Dense(
                self.output_shape[0])(output_node)
            output_node = tf.keras.layers.Softmax()(output_node)
            return output_node

        # TODO: Add hp.Choice to use sigmoid

        return hyper_block.Reshape(self.output_shape).build(hp, output_node)
示例#4
0
    def build(self, hp, inputs=None):
        input_node = layer_utils.format_inputs(inputs, self.name, num=1)[0]
        output_node = input_node

        block_type = hp.Choice('block_type', ['resnet', 'xception', 'vanilla'],
                               default='resnet')

        if block_type == 'resnet':
            output_node = ResNetBlock().build(hp, output_node)
        elif block_type == 'xception':
            output_node = XceptionBlock().build(hp, output_node)
        elif block_type == 'vanilla':
            output_node = ConvBlock().build(hp, output_node)
        return output_node
示例#5
0
 def build(self, hp, inputs=None):
     input_node = layer_utils.format_inputs(inputs, self.name, num=1)[0]
     output_node = input_node
     if len(output_node.shape) > 5:
         raise ValueError(
             'Expect the input tensor to have less or equal to 5 '
             'dimensions, but got {shape}'.format(shape=output_node.shape))
     # Flatten the input tensor
     # TODO: Add hp.Choice to use Flatten()
     if len(output_node.shape) > 2:
         global_average_pooling = \
             layer_utils.get_global_average_pooling_layer_class(
                 output_node.shape)
         output_node = global_average_pooling()(output_node)
     return output_node
示例#6
0
    def build(self, hp, inputs=None):
        input_node = layer_utils.format_inputs(inputs, self.name, num=1)[0]
        shape = input_node.shape.as_list()
        if len(shape) < 3:
            raise ValueError("Expect the input tensor to have "
                             "at least 3 dimensions for rnn models, "
                             "but got {shape}".format(shape=input_node.shape))

        # Flatten feature_list to a single dimension.
        # Final shape 3-D (num_sample , time_steps , features)
        feature_size = np.prod(shape[2:])
        input_node = tf.reshape(input_node, [-1, shape[1], feature_size])
        output_node = input_node

        in_layer = const.Constant.RNN_LAYERS[hp.Choice('rnn_type',
                                                       ['gru', 'lstm'],
                                                       default='lstm')]
        choice_of_layers = hp.Choice('num_layers', [1, 2, 3], default=2)

        bidirectional = self.bidirectional
        if bidirectional is None:
            bidirectional = hp.Choice('bidirectional', [True, False],
                                      default=True)

        return_sequences = self.return_sequences
        if return_sequences is None:
            return_sequences = hp.Choice('return_sequences', [True, False],
                                         default=True)

        for i in range(choice_of_layers):
            temp_return_sequences = True
            if i == choice_of_layers - 1:
                temp_return_sequences = return_sequences
            if bidirectional:
                output_node = tf.keras.layers.Bidirectional(
                    in_layer(
                        feature_size,
                        return_sequences=temp_return_sequences))(output_node)
            else:
                output_node = in_layer(feature_size,
                                       return_sequences=return_sequences)

        return output_node
示例#7
0
    def build(self, hp, inputs=None):
        input_node = layer_utils.format_inputs(inputs, self.name, num=1)[0]
        output_node = input_node

        # No need to reduce.
        if len(output_node.shape) <= 2:
            return output_node

        reduction_type = hp.Choice('reduction_type',
                                   ['flatten', 'global_max', 'global_ave'],
                                   default='global_ave')
        if reduction_type == 'flatten':
            output_node = Flatten().build(hp, output_node)
        elif reduction_type == 'global_max':
            output_node = layer_utils.get_global_max_pooling_layer(
                output_node.shape)()(output_node)
        elif reduction_type == 'global_ave':
            output_node = layer_utils.get_global_average_pooling_layer(
                output_node.shape)()(output_node)
        return output_node
示例#8
0
 def build(self, hp, inputs=None):
     input_node = layer_utils.format_inputs(inputs, self.name, num=1)[0]
     output_node = input_node
     output_node = Flatten().build(hp, output_node)
     active_category = hp.Choice('activate_category',
                                 ['softmax', 'relu', 'tanh', 'sigmoid'],
                                 default='relu')
     layer_stack = hp.Choice('layer_stack',
                             ['dense-bn-act', 'dense-act', 'act-bn-dense'],
                             default='act-bn-dense')
     for i in range(hp.Choice('num_layers', [1, 2, 3], default=2)):
         if layer_stack == 'dense-bn-act':
             output_node = tf.keras.layers.Dense(
                 hp.Choice('units_{i}'.format(i=i),
                           [16, 32, 64, 128, 256, 512, 1024],
                           default=32))(output_node)
             output_node = tf.keras.layers.BatchNormalization()(output_node)
             output_node = tf.keras.layers.Activation(active_category)(
                 output_node)
             output_node = tf.keras.layers.Dropout(rate=hp.Choice(
                 'dropout_rate', [0, 0.25, 0.5], default=0.5))(output_node)
         elif layer_stack == 'dense-act':
             output_node = tf.keras.layers.Dense(
                 hp.Choice('units_{i}'.format(i=i),
                           [16, 32, 64, 128, 256, 512, 1024],
                           default=32))(output_node)
             output_node = tf.keras.layers.Activation(active_category)(
                 output_node)
             output_node = tf.keras.layers.Dropout(rate=hp.Choice(
                 'dropout_rate', [0, 0.25, 0.5], default=0.5))(output_node)
         else:
             output_node = tf.keras.layers.Activation(active_category)(
                 output_node)
             output_node = tf.keras.layers.BatchNormalization()(output_node)
             output_node = tf.keras.layers.Dense(
                 hp.Choice('units_{i}'.format(i=i),
                           [16, 32, 64, 128, 256, 512, 1024],
                           default=32))(output_node)
             output_node = tf.keras.layers.Dropout(rate=hp.Choice(
                 'dropout_rate', [0, 0.25, 0.5], default=0.5))(output_node)
     return output_node
示例#9
0
    def build(self, hp, inputs=None):
        input_node = layer_utils.format_inputs(inputs, self.name, num=1)[0]
        output_node = input_node
        channel_axis = 1 \
            if tf.keras.backend.image_data_format() == 'channels_first' else -1

        # Parameters
        # [general]
        kernel_size = hp.Range("kernel_size", 3, 5)
        initial_strides = (2, 2)
        activation = hp.Choice("activation", ["relu", "selu"])
        # [Entry Flow]
        conv2d_filters = hp.Choice("conv2d_num_filters", [32, 64, 128])
        sep_filters = hp.Range("sep_num_filters", 128, 768)
        # [Middle Flow]
        residual_blocks = hp.Range("num_residual_blocks", 2, 8)
        # [Exit Flow]

        # Model
        # Initial conv2d
        dims = conv2d_filters
        output_node = self._conv(dims,
                                 kernel_size=(kernel_size, kernel_size),
                                 activation=activation,
                                 strides=initial_strides)(output_node)
        # Separable convs
        dims = sep_filters
        for _ in range(residual_blocks):
            output_node = self._residual(
                dims,
                activation=activation,
                max_pooling=False,
                channel_axis=channel_axis)(output_node)
        # Exit
        dims *= 2
        output_node = self._residual(dims,
                                     activation=activation,
                                     max_pooling=True,
                                     channel_axis=channel_axis)(output_node)

        return output_node
示例#10
0
    def build(self, hp, inputs=None):
        inputs = layer_utils.format_inputs(inputs, self.name)
        if len(inputs) == 1:
            return inputs

        if not all([
                shape_compatible(input_node.shape, inputs[0].shape)
                for input_node in inputs
        ]):
            new_inputs = []
            for input_node in inputs:
                new_inputs.append(Flatten().build(hp, input_node))
            inputs = new_inputs

        # TODO: Even inputs have different shape[-1], they can still be Add(
        #  ) after another layer. Check if the inputs are all of the same
        #  shape
        if all([input_node.shape == inputs[0].shape for input_node in inputs]):
            if hp.Choice("merge_type", ['Add', 'Concatenate'], default='Add'):
                return tf.keras.layers.Add(inputs)

        return tf.keras.layers.Add()(inputs)
示例#11
0
    def build(self, hp, inputs=None):
        input_node = layer_utils.format_inputs(inputs, self.name, num=1)[0]
        output_node = input_node

        # No need to reduce.
        if len(output_node.shape) <= 2:
            return output_node

        reduction_type = hp.Choice('reduction_type',
                                   ['flatten', 'max', 'ave', 'min'],
                                   default='global_ave')

        if reduction_type == 'flatten':
            output_node = Flatten().build(hp, output_node)
        elif reduction_type == 'max':
            output_node = tf.math.reduce_max(output_node, axis=-2)
        elif reduction_type == 'ave':
            output_node = tf.math.reduce_mean(output_node, axis=-2)
        elif reduction_type == 'min':
            output_node = tf.math.reduce_min(output_node, axis=-2)

        return output_node
示例#12
0
    def build(self, hp):
        real_nodes = {}
        for input_node in self._model_inputs:
            node_id = self._node_to_id[input_node]
            real_nodes[node_id] = input_node.build()
        for hypermodel in self._hypermodels:
            if isinstance(hypermodel, processor.HyperPreprocessor):
                continue
            temp_inputs = [real_nodes[self._node_to_id[input_node]]
                           for input_node in hypermodel.inputs]
            outputs = hypermodel.build(hp,
                                       inputs=temp_inputs)
            outputs = layer_utils.format_inputs(outputs, hypermodel.name)
            for output_node, real_output_node in zip(hypermodel.outputs,
                                                     outputs):
                real_nodes[self._node_to_id[output_node]] = real_output_node
        model = tf.keras.Model(
            [real_nodes[self._node_to_id[input_node]] for input_node in
             self._model_inputs],
            [real_nodes[self._node_to_id[output_node]] for output_node in
             self.outputs])

        return self._compiled(hp, model)
示例#13
0
 def build(self, hp, inputs=None):
     input_node = layer_utils.format_inputs(inputs, self.name, num=1)[0]
     output_node = input_node
     output_node = Flatten().build(hp, output_node)
     layer_stack = hp.Choice('layer_stack', ['dense-bn-act', 'dense-act'],
                             default='dense-bn-act')
     dropout_rate = hp.Choice('dropout_rate', [0, 0.25, 0.5], default=0.5)
     for i in range(hp.Choice('num_layers', [1, 2, 3], default=2)):
         units = hp.Choice('units_{i}'.format(i=i),
                           [16, 32, 64, 128, 256, 512, 1024],
                           default=32)
         if layer_stack == 'dense-bn-act':
             output_node = tf.keras.layers.Dense(units)(output_node)
             output_node = tf.keras.layers.BatchNormalization()(output_node)
             output_node = tf.keras.layers.ReLU()(output_node)
             output_node = tf.keras.layers.Dropout(dropout_rate)(
                 output_node)
         elif layer_stack == 'dense-act':
             output_node = tf.keras.layers.Dense(units)(output_node)
             output_node = tf.keras.layers.ReLU()(output_node)
             output_node = tf.keras.layers.Dropout(dropout_rate)(
                 output_node)
     return output_node
示例#14
0
 def build(self, hp, inputs=None):
     input_node = layer_utils.format_inputs(inputs, self.name, num=1)[0]
     if len(input_node.shape) > 2:
         return tf.keras.layers.Flatten()(input_node)
     return input_node