コード例 #1
0
ファイル: graph.py プロジェクト: rbn42/autokeras
    def produce_model(self):
        """Build a new Keras model based on the current graph."""
        input_tensor = Input(
            shape=get_int_tuple(self.model.inputs[0].shape[1:]))
        input_id = self.node_to_id[self.model.inputs[0]]
        output_id = self.node_to_id[self.model.outputs[0]]

        id_to_tensor = {input_id: input_tensor}
        for v in self._topological_order():
            for u, layer_id in self.reverse_adj_list[v]:
                layer = self.layer_list[layer_id]

                if isinstance(layer, (WeightedAdd, Concatenate)):
                    edge_input_tensor = list(
                        map(lambda x: id_to_tensor[x],
                            self.layer_id_to_input_node_ids[layer_id]))
                else:
                    edge_input_tensor = id_to_tensor[u]

                if layer_id in self.old_layer_ids:
                    new_layer = copy_layer(layer)
                else:
                    new_layer = layer

                temp_tensor = new_layer(edge_input_tensor)
                id_to_tensor[v] = temp_tensor
        return Model(input_tensor, id_to_tensor[output_id])
コード例 #2
0
ファイル: graph.py プロジェクト: rbn42/autokeras
    def to_add_skip_model(self, start, end):
        """Add a weighted add skip connection from start node to end node.

        Returns:
            A new Keras model with the added connection.
        """
        conv_input_id = self.node_to_id[start.input]
        relu_input_id = self.adj_list[self.node_to_id[end.output]][0][0]

        # Add the pooling layer chain.
        pooling_layer_list = self.get_pooling_layers(conv_input_id,
                                                     relu_input_id)
        skip_output_id = conv_input_id
        for index, layer_id in enumerate(pooling_layer_list):
            layer = self.layer_list[layer_id]
            self._add_node(index)
            new_node_id = self.node_to_id[index]
            self._add_edge(copy_layer(layer), skip_output_id, new_node_id,
                           False)
            skip_output_id = new_node_id

        # Add the weighted add layer.
        self._add_node('a')
        new_node_id = self.node_to_id['a']
        layer = WeightedAdd()
        single_input_shape = get_int_tuple(start.output_shape)
        layer.build([single_input_shape, single_input_shape])

        relu_output_id = self.adj_list[relu_input_id][0][0]
        self._redirect_edge(relu_input_id, relu_output_id, new_node_id)
        self._add_edge(layer, new_node_id, relu_output_id, False)
        self._add_edge(layer, skip_output_id, relu_output_id, False)

        return self.produce_model()
コード例 #3
0
def wider_bn(layer, start_dim, total_dim, n_add):
    """Get new layer with wider batch normalization for current layer

   Args:
       layer: the layer from which we get new layer with wider batch normalization
       start_dim: the started dimension
       total_dim: the total dimension
       n_add: the output shape

   Returns:
       The new layer with wider batch normalization
   """
    weights = layer.get_weights()

    input_shape = list((None, ) * layer.input_spec.ndim)
    input_shape[-1] = get_int_tuple(layer.gamma.shape)[0]
    input_shape[-1] += n_add

    temp_layer = BatchNormalization()
    add_input_shape = list(input_shape)
    add_input_shape[-1] = n_add
    temp_layer.build(tuple(add_input_shape))
    new_weights = temp_layer.get_weights()

    student_w = tuple()
    for weight, new_weight in zip(weights, new_weights):
        temp_w = weight.copy()
        temp_w = np.concatenate(
            (temp_w[:start_dim], new_weight, temp_w[start_dim:total_dim]))
        student_w += (temp_w, )
    new_layer = BatchNormalization()
    new_layer.build(input_shape)
    new_layer.set_weights(student_w)
    return new_layer
コード例 #4
0
    def _refresh(self):
        input_tensor = Input(
            shape=get_int_tuple(self.model.inputs[0].shape[1:]))
        input_id = self.node_to_id[self.model.inputs[0]]
        output_id = self.node_to_id[self.model.outputs[0]]

        self.node_list[input_id] = input_tensor
        self.node_to_id[input_tensor] = input_id
        for v in self._topological_order():
            for u, layer_id in self.reverse_adj_list[v]:
                layer = self.layer_list[layer_id]

                if isinstance(layer, (WeightedAdd, Concatenate)):
                    edge_input_tensor = list(
                        map(lambda x: self.node_list[x],
                            self.layer_id_to_input_node_ids[layer_id]))
                else:
                    edge_input_tensor = self.node_list[u]

                if layer_id in self.old_layer_ids:
                    new_layer = copy_layer(layer)
                else:
                    new_layer = layer
                    self.old_layer_ids[layer_id] = True

                temp_tensor = new_layer(edge_input_tensor)
                self.node_list[v] = temp_tensor
                self.node_to_id[temp_tensor] = v
        self.model = Model(input_tensor, self.node_list[output_id])
コード例 #5
0
def to_stub_model(model, weighted=False):
    node_count = 0
    tensor_dict = {}
    ret = StubModel()
    ret.input_shape = model.input_shape
    for layer in model.layers:
        if isinstance(layer.input, list):
            input_nodes = layer.input
        else:
            input_nodes = [layer.input]

        for node in input_nodes + [layer.output]:
            if node not in tensor_dict:
                tensor_dict[node] = StubTensor(get_int_tuple(node.shape))
                node_count += 1

        if isinstance(layer.input, list):
            input_id = []
            for node in layer.input:
                input_id.append(tensor_dict[node])
        else:
            input_id = tensor_dict[layer.input]
        output_id = tensor_dict[layer.output]

        if is_conv_layer(layer):
            temp_stub_layer = StubConv(layer.filters, layer.kernel_size,
                                       layer.__class__, input_id, output_id)
        elif isinstance(layer, Dense):
            temp_stub_layer = StubDense(layer.units, layer.activation,
                                        input_id, output_id)
        elif isinstance(layer, WeightedAdd):
            temp_stub_layer = StubWeightedAdd(input_id, output_id)
        elif isinstance(layer, Concatenate):
            temp_stub_layer = StubConcatenate(input_id, output_id)
        elif isinstance(layer, BatchNormalization):
            temp_stub_layer = StubBatchNormalization(input_id, output_id)
        elif isinstance(layer, Activation):
            temp_stub_layer = StubActivation(layer.activation, input_id,
                                             output_id)
        elif isinstance(layer, InputLayer):
            temp_stub_layer = StubLayer(input_id, output_id)
        elif isinstance(layer, Flatten):
            temp_stub_layer = StubFlatten(input_id, output_id)
        elif isinstance(layer, Dropout):
            temp_stub_layer = StubDropout(layer.rate, input_id, output_id)
        elif is_pooling_layer(layer):
            temp_stub_layer = StubPooling(layer.__class__, input_id, output_id)
        elif is_global_pooling_layer(layer):
            temp_stub_layer = StubGlobalPooling(layer.__class__, input_id,
                                                output_id)
        else:
            raise TypeError("The layer {} is illegal.".format(layer))
        if weighted:
            temp_stub_layer.set_weights(layer.get_weights())
        ret.add_layer(temp_stub_layer)
    ret.inputs = [tensor_dict[model.inputs[0]]]
    ret.outputs = [tensor_dict[model.outputs[0]]]
    return ret
コード例 #6
0
ファイル: graph.py プロジェクト: rbn42/autokeras
    def to_concat_skip_model(self, start, end):
        """Add a weighted add concatenate connection from start node to end node.

        Returns:
            A new Keras model with the added connection.
        """
        conv_input_id = self.node_to_id[start.input]
        relu_input_id = self.adj_list[self.node_to_id[end.output]][0][0]

        # Add the pooling layer chain.
        pooling_layer_list = self.get_pooling_layers(conv_input_id,
                                                     relu_input_id)
        skip_output_id = conv_input_id
        for index, layer_id in enumerate(pooling_layer_list):
            layer = self.layer_list[layer_id]
            self._add_node(index)
            new_node_id = self.node_to_id[index]
            self._add_edge(copy_layer(layer), skip_output_id, new_node_id,
                           False)
            skip_output_id = new_node_id

        # Add the weighted add layer.
        self._add_node('a')
        new_node_id = self.node_to_id['a']
        layer = Concatenate()
        left_input_shape = get_int_tuple(end.output_shape)
        right_input_shape = np.concatenate(
            (left_input_shape[:-1], get_int_tuple(start.output_shape[-1:])))
        layer.build([left_input_shape, right_input_shape])

        relu_output_id = self.adj_list[relu_input_id][0][0]
        self._redirect_edge(relu_input_id, relu_output_id, new_node_id)
        self._add_edge(layer, new_node_id, relu_output_id, False)
        self._add_edge(layer, skip_output_id, relu_output_id, False)

        # Widen the related layers.
        self.next_vis = [False] * self.n_nodes
        self.pre_vis = [False] * self.n_nodes
        self.middle_layer_vis = [False] * len(self.layer_list)

        self.pre_vis[relu_output_id] = True
        dim = get_int_tuple(end.output_shape)[-1]
        n_add = get_int_tuple(start.output_shape)[-1]
        self._search_next(relu_output_id, dim, dim, n_add)
        return self.produce_model()
コード例 #7
0
ファイル: graph.py プロジェクト: rbn42/autokeras
    def to_wider_model(self, pre_layer, n_add):
        """Widen the last dimension of the output of the pre_layer.

        Args:
            pre_layer: A convolutional layer or dense layer.
            n_add: The number of dimensions to add.

        Returns:
            A new Keras model with the widened layers.
        """
        output_id = self.node_to_id[pre_layer.output]
        self.next_vis = [False] * self.n_nodes
        self.pre_vis = [False] * self.n_nodes
        self.middle_layer_vis = [False] * len(self.layer_list)
        dim = get_int_tuple(pre_layer.output_shape)[-1]
        self._search_next(output_id, dim, dim, n_add)
        return self.produce_model()
コード例 #8
0
def wider_weighted_add(layer, n_add):
    """Return wider weighted add layer

    Args:
        layer: the layer from which we get wider weighted add layer
        n_add: output shape

    Returns:
        The wider weighted add layer
    """
    input_shape, _ = get_int_tuple(layer.input_shape)
    input_shape = list(input_shape)
    input_shape[-1] += n_add
    new_layer = WeightedAdd()
    # new_layer.build([input_shape, input_shape])
    new_layer.set_weights(layer.get_weights())
    return new_layer
コード例 #9
0
ファイル: graph.py プロジェクト: bradbann/autokeras
    def produce_model(self):
        """Build a new Keras model based on the current graph."""
        input_tensor = Input(shape=get_int_tuple(self.input.shape[1:]))
        input_id = self.node_to_id[self.input]
        output_id = self.node_to_id[self.output]

        new_to_old_layer = {}

        node_list = deepcopy(self.node_list)
        node_list[input_id] = input_tensor

        node_to_id = deepcopy(self.node_to_id)
        node_to_id[input_tensor] = input_id

        for v in self._topological_order():
            for u, layer_id in self.reverse_adj_list[v]:
                layer = self.layer_list[layer_id]

                if isinstance(layer, (StubWeightedAdd, StubConcatenate)):
                    edge_input_tensor = list(
                        map(lambda x: node_list[x],
                            self.layer_id_to_input_node_ids[layer_id]))
                else:
                    edge_input_tensor = node_list[u]

                new_layer = to_real_layer(layer)
                new_to_old_layer[new_layer] = layer

                temp_tensor = new_layer(edge_input_tensor)
                node_list[v] = temp_tensor
                node_to_id[temp_tensor] = v
        model = Model(input_tensor, node_list[output_id])
        for layer in model.layers[1:]:
            if not isinstance(layer, (Activation, Dropout, Concatenate)):
                old_layer = new_to_old_layer[layer]
                if self.weighted:
                    layer.set_weights(old_layer.get_weights())
        return model