示例#1
0
文件: graph.py 项目: rbn42/autokeras
    def to_add_skip_model(self, start, end):
        """Add a weighted add skip connection from start node to end node.

        Returns:
            A new Keras model with the added connection.
        """
        conv_input_id = self.node_to_id[start.input]
        relu_input_id = self.adj_list[self.node_to_id[end.output]][0][0]

        # Add the pooling layer chain.
        pooling_layer_list = self.get_pooling_layers(conv_input_id,
                                                     relu_input_id)
        skip_output_id = conv_input_id
        for index, layer_id in enumerate(pooling_layer_list):
            layer = self.layer_list[layer_id]
            self._add_node(index)
            new_node_id = self.node_to_id[index]
            self._add_edge(copy_layer(layer), skip_output_id, new_node_id,
                           False)
            skip_output_id = new_node_id

        # Add the weighted add layer.
        self._add_node('a')
        new_node_id = self.node_to_id['a']
        layer = WeightedAdd()
        single_input_shape = get_int_tuple(start.output_shape)
        layer.build([single_input_shape, single_input_shape])

        relu_output_id = self.adj_list[relu_input_id][0][0]
        self._redirect_edge(relu_input_id, relu_output_id, new_node_id)
        self._add_edge(layer, new_node_id, relu_output_id, False)
        self._add_edge(layer, skip_output_id, relu_output_id, False)

        return self.produce_model()
示例#2
0
def to_real_layer(layer):
    if is_layer(layer, 'Dense'):
        return Dense(layer.units, activation=layer.activation)
    if is_layer(layer, 'Conv'):
        return layer.func(layer.filters,
                          kernel_size=layer.kernel_size,
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(1e-4))
    if is_layer(layer, 'Pooling'):
        return layer.func(padding='same')
    if is_layer(layer, 'BatchNormalization'):
        return BatchNormalization()
    if is_layer(layer, 'Concatenate'):
        return Concatenate()
    if is_layer(layer, 'WeightedAdd'):
        return WeightedAdd()
    if is_layer(layer, 'Dropout'):
        return Dropout(layer.rate)
    if is_layer(layer, 'Activation'):
        return Activation(layer.func)
    if is_layer(layer, 'Flatten'):
        return Flatten()
    if is_layer(layer, 'GlobalAveragePooling'):
        return layer.func()
示例#3
0
def wider_weighted_add(layer, n_add):
    """Return wider weighted add layer

    Args:
        layer: the layer from which we get wider weighted add layer
        n_add: output shape

    Returns:
        The wider weighted add layer
    """
    input_shape, _ = get_int_tuple(layer.input_shape)
    input_shape = list(input_shape)
    input_shape[-1] += n_add
    new_layer = WeightedAdd()
    # new_layer.build([input_shape, input_shape])
    new_layer.set_weights(layer.get_weights())
    return new_layer
示例#4
0
def my_layer():
    """test one specify layer"""
    a = Input(shape=(3, 3, 2))
    b = WeightedAdd()(a)
    model = Model(inputs=a, outputs=b)
    data = np.ones((1, 3, 3, 2))
    print(model.predict_on_batch(data))
    model.compile(optimizer='Adam', loss=mean_squared_error)
    model.fit(data, data, epochs=1000)
    print(model.predict_on_batch(data))
示例#5
0
def get_add_skip_model():
    output_tensor = input_tensor = Input(shape=(5, 5, 3))
    output_tensor = BatchNormalization()(output_tensor)
    output_tensor = Activation('relu')(output_tensor)
    output_tensor = Conv2D(3,
                           kernel_size=(3, 3),
                           padding='same',
                           activation='linear')(output_tensor)
    output_tensor = Dropout(constant.CONV_DROPOUT_RATE)(output_tensor)

    output_tensor = BatchNormalization()(output_tensor)
    output_tensor = Activation('relu')(output_tensor)
    output_tensor = Conv2D(3,
                           kernel_size=(3, 3),
                           padding='same',
                           activation='linear')(output_tensor)
    output_tensor = Dropout(constant.CONV_DROPOUT_RATE)(output_tensor)

    add_input = output_tensor
    output_tensor = BatchNormalization()(output_tensor)
    output_tensor = Activation('relu')(output_tensor)
    output_tensor = Conv2D(3,
                           kernel_size=(3, 3),
                           padding='same',
                           activation='linear')(output_tensor)
    output_tensor = Dropout(constant.CONV_DROPOUT_RATE)(output_tensor)

    output_tensor = WeightedAdd()([output_tensor, add_input])
    add_input = output_tensor
    output_tensor = BatchNormalization()(output_tensor)
    output_tensor = Activation('relu')(output_tensor)
    output_tensor = Conv2D(3,
                           kernel_size=(3, 3),
                           padding='same',
                           activation='linear')(output_tensor)
    output_tensor = Dropout(constant.CONV_DROPOUT_RATE)(output_tensor)

    output_tensor = WeightedAdd()([output_tensor, add_input])
    output_tensor = Flatten()(output_tensor)
    output_tensor = Dense(5, activation='relu')(output_tensor)
    output_tensor = Dropout(constant.DENSE_DROPOUT_RATE)(output_tensor)
    output_tensor = Dense(5, activation='softmax')(output_tensor)
    return Model(inputs=input_tensor, outputs=output_tensor)
示例#6
0
    def to_add_skip_model(self, start_id, end_id):
        """Add a weighted add skip connection from before start node to end node.

        Args:
            start_id: The convolutional layer ID, after which to start the skip-connection.
            end_id: The convolutional layer ID, after which to end the skip-connection.

        Returns:
            A new Keras model with the added connection.
        """
        self.operation_history.append(('to_add_skip_model', start_id, end_id))
        conv_layer_ids = self._conv_layer_ids_in_order()
        start_id = conv_layer_ids[conv_layer_ids.index(start_id) + 1]
        conv_block_input_id = self.layer_id_to_input_node_ids[start_id][0]
        conv_block_input_id = self.reverse_adj_list[conv_block_input_id][0][0]
        conv_block_input_id = self.reverse_adj_list[conv_block_input_id][0][0]

        dropout_input_id = self._conv_block_end_node(end_id)

        # Add the pooling layer chain.
        pooling_layer_list = self._get_pooling_layers(conv_block_input_id,
                                                      dropout_input_id)
        skip_output_id = conv_block_input_id
        for index, layer_id in enumerate(pooling_layer_list):
            layer = self.layer_list[layer_id]
            new_node_id = self._add_new_node()
            self._add_edge(deepcopy(layer), skip_output_id, new_node_id)
            skip_output_id = new_node_id

        # Add the weighted add layer.
        new_node_id = self._add_new_node()
        layer = StubWeightedAdd()
        if self.weighted:
            layer.set_weights(WeightedAdd().get_weights())

        dropout_output_id = self.adj_list[dropout_input_id][0][0]
        self._redirect_edge(dropout_input_id, dropout_output_id, new_node_id)
        self._add_edge(layer, new_node_id, dropout_output_id)
        self._add_edge(layer, skip_output_id, dropout_output_id)
示例#7
0
 def _new_weighted_add_layer(self):
     return WeightedAdd()