Exemple #1
0
def to_skip_connection_graph(graph):
    """Return skip_connected model

    Args:
        graph: the model from which we get skip_connected model

    Returns:
        The skip_connected model
    """
    # The last conv layer cannot be widen since wider operator cannot be done over the two sides of flatten.
    weighted_layers = list(filter(lambda x: is_conv_layer(x),
                                  graph.layer_list))[:-1]
    index_a = randint(0, len(weighted_layers) - 1)
    index_b = randint(0, len(weighted_layers) - 1)
    if index_a == index_b:
        if index_b == 0:
            index_a = index_b + 1
        else:
            index_a = index_b - 1
    if index_a > index_b:
        index_a, index_b = index_b, index_a
    a = weighted_layers[index_a]
    b = weighted_layers[index_b]
    a_id = graph.layer_to_id[a]
    b_id = graph.layer_to_id[b]
    if a.output_shape[-1] != b.output_shape[-1]:
        graph.to_concat_skip_model(a_id, b_id)
    elif random() < 0.5:
        graph.to_add_skip_model(a_id, b_id)
    else:
        graph.to_concat_skip_model(a_id, b_id)
    return graph
Exemple #2
0
    def _search_pre(self, u, start_dim, total_dim, n_add):
        """Search upward the graph for widening the layers.

        Args:
            u: The starting node identifier.
            start_dim: The dimension to insert the additional dimensions.
            total_dim: The total number of dimensions the layer has before widening.
            n_add: The number of dimensions to add.
        """
        if self.pre_vis[u]:
            return
        self.pre_vis[u] = True
        self._search_next(u, start_dim, total_dim, n_add)
        for v, layer_id in self.reverse_adj_list[u]:
            layer = self.layer_list[layer_id]
            if is_conv_layer(layer):
                new_layer = wider_pre_conv(layer, n_add)
                self._replace_layer(layer_id, new_layer)
            elif isinstance(layer, Dense):
                new_layer = wider_pre_dense(layer, n_add)
                self._replace_layer(layer_id, new_layer)
            elif isinstance(layer, BatchNormalization):
                self._search_pre(v, start_dim, total_dim, n_add)
            elif isinstance(layer, Concatenate):
                if self.node_list[v] is layer.input[1]:
                    # v is on the right
                    pre_total_dim = layer.input_shape[1][-1]
                    pre_start_dim = start_dim - (total_dim - pre_total_dim)
                    self._search_pre(v, pre_start_dim, pre_total_dim, n_add)
            else:
                self._search_pre(v, start_dim, total_dim, n_add)
Exemple #3
0
def to_stub_model(model, weighted=False):
    node_count = 0
    tensor_dict = {}
    ret = StubModel()
    ret.input_shape = model.input_shape
    for layer in model.layers:
        if isinstance(layer.input, list):
            input_nodes = layer.input
        else:
            input_nodes = [layer.input]

        for node in input_nodes + [layer.output]:
            if node not in tensor_dict:
                tensor_dict[node] = StubTensor(get_int_tuple(node.shape))
                node_count += 1

        if isinstance(layer.input, list):
            input_id = []
            for node in layer.input:
                input_id.append(tensor_dict[node])
        else:
            input_id = tensor_dict[layer.input]
        output_id = tensor_dict[layer.output]

        if is_conv_layer(layer):
            temp_stub_layer = StubConv(layer.filters, layer.kernel_size,
                                       layer.__class__, input_id, output_id)
        elif isinstance(layer, Dense):
            temp_stub_layer = StubDense(layer.units, layer.activation,
                                        input_id, output_id)
        elif isinstance(layer, WeightedAdd):
            temp_stub_layer = StubWeightedAdd(input_id, output_id)
        elif isinstance(layer, Concatenate):
            temp_stub_layer = StubConcatenate(input_id, output_id)
        elif isinstance(layer, BatchNormalization):
            temp_stub_layer = StubBatchNormalization(input_id, output_id)
        elif isinstance(layer, Activation):
            temp_stub_layer = StubActivation(layer.activation, input_id,
                                             output_id)
        elif isinstance(layer, InputLayer):
            temp_stub_layer = StubLayer(input_id, output_id)
        elif isinstance(layer, Flatten):
            temp_stub_layer = StubFlatten(input_id, output_id)
        elif isinstance(layer, Dropout):
            temp_stub_layer = StubDropout(layer.rate, input_id, output_id)
        elif is_pooling_layer(layer):
            temp_stub_layer = StubPooling(layer.__class__, input_id, output_id)
        elif is_global_pooling_layer(layer):
            temp_stub_layer = StubGlobalPooling(layer.__class__, input_id,
                                                output_id)
        else:
            raise TypeError("The layer {} is illegal.".format(layer))
        if weighted:
            temp_stub_layer.set_weights(layer.get_weights())
        ret.add_layer(temp_stub_layer)
    ret.inputs = [tensor_dict[model.inputs[0]]]
    ret.outputs = [tensor_dict[model.outputs[0]]]
    return ret
def transform(graph):
    """Return new model after operations

    Args:
        graph: the model from which we get new model

    Returns:
        A list of graphs.
    """
    graphs = []
    for target_id in graph.wide_layer_ids():
        temp_graph = deepcopy(graph)
        if is_conv_layer(temp_graph.layer_list[target_id]):
            n_add = temp_graph.layer_list[target_id].filters
            temp_graph.to_wider_model(target_id, n_add)
        else:
            n_add = temp_graph.layer_list[target_id].units
            temp_graph.to_wider_model(target_id, n_add)
        graphs.append(temp_graph)

    for target_id in graph.deep_layer_ids():
        temp_graph = deepcopy(graph)
        if is_conv_layer(temp_graph.layer_list[target_id]):
            temp_graph.to_conv_deeper_model(target_id, randint(1, 2) * 2 + 1)
        else:
            temp_graph.to_dense_deeper_model(target_id)
        graphs.append(temp_graph)

    skip_ids = graph.skip_connection_layer_ids()
    for index_a, a_id in enumerate(skip_ids):
        for b_id in skip_ids[index_a + 1:]:
            temp_graph = deepcopy(graph)
            temp_graph.to_concat_skip_model(a_id, b_id)
            # if temp_graph.layer_list[a_id].filters != temp_graph.layer_list[b_id].filters:
            #     temp_graph.to_concat_skip_model(a_id, b_id)
            # else:
            #     temp_graph.to_add_skip_model(a_id, b_id)
            graphs.append(temp_graph)

    graphs = list(filter(legal_graph, graphs))

    return graphs
Exemple #5
0
 def _is_layer(self, layer, layer_type):
     if layer_type == 'Conv':
         return is_conv_layer(layer)
     if layer_type == 'Dense':
         return isinstance(layer, Dense)
     if layer_type == 'BatchNormalization':
         return isinstance(layer, BatchNormalization)
     if layer_type == 'Concatenate':
         return isinstance(layer, Concatenate)
     if layer_type == 'WeightedAdd':
         return isinstance(layer, WeightedAdd)
     if layer_type == 'Pooling':
         return is_pooling_layer(layer)
Exemple #6
0
    def _search_next(self, u, start_dim, total_dim, n_add):
        """Search downward the graph for widening the layers.

        Args:
            u: The starting node identifier.
            start_dim: The dimension to insert the additional dimensions.
            total_dim: The total number of dimensions the layer has before widening.
            n_add: The number of dimensions to add.
        """
        if self.next_vis[u]:
            return
        self.next_vis[u] = True
        self._search_pre(u, start_dim, total_dim, n_add)
        for v, layer_id in self.adj_list[u]:
            layer = self.layer_list[layer_id]

            if is_conv_layer(layer):
                new_layer = wider_next_conv(layer, start_dim, total_dim, n_add)
                self._replace_layer(layer_id, new_layer)

            elif isinstance(layer, Dense):
                new_layer = wider_next_dense(layer, start_dim, total_dim,
                                             n_add)
                self._replace_layer(layer_id, new_layer)

            elif isinstance(layer, BatchNormalization):
                if not self.middle_layer_vis[layer_id]:
                    self.middle_layer_vis[layer_id] = True
                    new_layer = wider_bn(layer, start_dim, total_dim, n_add)
                    self._replace_layer(layer_id, new_layer)
                self._search_next(v, start_dim, total_dim, n_add)

            elif isinstance(layer, WeightedAdd):
                if not self.middle_layer_vis[layer_id]:
                    self.middle_layer_vis[layer_id] = True
                    new_layer = wider_weighted_add(layer, n_add)
                    self._replace_layer(layer_id, new_layer)
                self._search_next(v, start_dim, total_dim, n_add)

            elif isinstance(layer, Concatenate):
                next_start_dim = start_dim
                next_total_dim = layer.output_shape[-1]
                if self.node_list[u] is layer.input[1]:
                    # u is on the right of the concat
                    next_start_dim += next_total_dim - total_dim
                self._search_next(v, next_start_dim, next_total_dim, n_add)

            else:
                self._search_next(v, start_dim, total_dim, n_add)
Exemple #7
0
def to_stub_model(model):
    node_count = 0
    node_to_id = {}
    ret = StubModel()
    ret.input_shape = model.input_shape
    for layer in model.layers:
        if isinstance(layer.input, list):
            input_nodes = layer.input
        else:
            input_nodes = [layer.input]

        for node in input_nodes + [layer.output]:
            if node not in node_to_id:
                node_to_id[node] = node_count
                node_count += 1

        if isinstance(layer.input, list):
            input_id = []
            for node in layer.input:
                input_id.append(node_to_id[node])
        else:
            input_id = node_to_id[layer.input]
        output_id = node_to_id[layer.output]

        if is_conv_layer(layer):
            temp_stub_layer = StubConv(layer.filters, input_id, output_id)
        elif isinstance(layer, Dense):
            temp_stub_layer = StubDense(layer.units, input_id, output_id)
        elif isinstance(layer, WeightedAdd):
            temp_stub_layer = StubWeightedAdd(input_id, output_id)
        elif isinstance(layer, Concatenate):
            temp_stub_layer = StubConcatenate(input_id, output_id)
        elif isinstance(layer, BatchNormalization):
            temp_stub_layer = StubBatchNormalization(input_id, output_id)
        elif isinstance(layer, Activation):
            temp_stub_layer = StubActivation(input_id, output_id)
        elif isinstance(layer, InputLayer):
            temp_stub_layer = StubLayer(input_id, output_id)
        elif isinstance(layer, Flatten):
            temp_stub_layer = StubLayer(input_id, output_id)
        elif isinstance(layer, Dropout):
            temp_stub_layer = StubLayer(input_id, output_id)
        elif is_pooling_layer(layer):
            temp_stub_layer = StubPooling(input_id, output_id)
        else:
            raise TypeError("The layer {} is illegal.".format(layer))
        ret.add_layer(temp_stub_layer)

    return ret
Exemple #8
0
def to_wider_graph(graph):
    """Return wider model

    Args:
        graph: the model from which we get wider model

    Returns:
        The wider model
    """
    # The last conv layer cannot be widen since wider operator cannot be done over the two sides of flatten.
    conv_layers = list(filter(lambda x: is_conv_layer(x),
                              graph.layer_list))[:-1]
    # The first layer cannot be widen since widen operator cannot be done over the two sides of flatten.
    # The last layer is softmax, which also cannot be widen.
    dense_layers = list(filter(lambda x: is_dense_layer(x),
                               graph.layer_list))[1:-1]

    if len(dense_layers) == 0:
        weighted_layers = conv_layers
    elif randint(0, 1) == 0:
        weighted_layers = conv_layers
    else:
        weighted_layers = dense_layers

    if len(weighted_layers) <= 1:
        target = weighted_layers[0]
    else:
        target = weighted_layers[randint(0, len(weighted_layers) - 1)]

    if is_conv_layer(target):
        n_add = randint(1, 4 * target.filters)
    else:
        n_add = randint(1, 4 * target.units)

    graph.to_wider_model(graph.layer_to_id[target], n_add)
    return graph
Exemple #9
0
def to_deeper_model(model):
    """Return deeper model

    Args:
        model: the model from which we get deeper model

    Returns:
        The deeper model
    """
    graph = Graph(model)
    weighted_layers = list(filter(lambda x: isinstance(x, tuple(WEIGHTED_LAYER_FUNC_LIST)), model.layers))[:-1]
    target = weighted_layers[randint(0, len(weighted_layers) - 1)]
    if is_conv_layer(target):
        return graph.to_dense_deeper_model(target)
    return graph.to_conv_deeper_model(target, randint(1, 2) * 2 + 1)
Exemple #10
0
def to_deeper_graph(graph):
    """Return deeper model

    Args:
        graph: the model from which we get deeper model

    Returns:
        The deeper model
    """
    weighted_layer_ids = graph.deep_layer_ids()
    target_id = weighted_layer_ids[randint(0, len(weighted_layer_ids) - 1)]
    if is_conv_layer(graph.layer_list[target_id]):
        graph.to_conv_deeper_model(target_id, randint(1, 2) * 2 + 1)
    else:
        graph.to_dense_deeper_model(target_id)
    return graph
Exemple #11
0
def to_wider_model(model):
    """Return wider model

    Args:
        model: the model from which we get wider model

    Returns:
        The wider model
    """
    graph = Graph(model)
    weighted_layers = list(filter(lambda x: isinstance(x, tuple(WEIGHTED_LAYER_FUNC_LIST)), model.layers))[:-1]
    target = weighted_layers[randint(0, len(weighted_layers) - 1)]
    if is_conv_layer(target):
        n_add = randint(1, 4 * target.filters)
    else:
        n_add = randint(1, 4 * target.units)
    return graph.to_wider_model(target, n_add)
Exemple #12
0
def to_deeper_graph(graph):
    """Return deeper model

    Args:
        graph: the model from which we get deeper model

    Returns:
        The deeper model
    """
    weighted_layers = list(
        filter(lambda x: isinstance(x, tuple(WEIGHTED_LAYER_FUNC_LIST)),
               graph.layer_list))[:-1]
    target = weighted_layers[randint(0, len(weighted_layers) - 1)]
    if is_conv_layer(target):
        graph.to_conv_deeper_model(graph.layer_to_id[target],
                                   randint(1, 2) * 2 + 1)
    else:
        graph.to_dense_deeper_model(graph.layer_to_id[target])
    return graph
Exemple #13
0
def to_wider_graph(graph):
    """Return wider model

    Args:
        graph: the model from which we get wider model

    Returns:
        The wider model
    """
    weighted_layer_ids = graph.wide_layer_ids()
    if len(weighted_layer_ids) <= 1:
        target_id = weighted_layer_ids[0]
    else:
        target_id = weighted_layer_ids[randint(0, len(weighted_layer_ids) - 1)]

    if is_conv_layer(graph.layer_list[target_id]):
        n_add = randint(1, 4 * graph.layer_list[target_id].filters)
    else:
        n_add = randint(1, 4 * graph.layer_list[target_id].units)

    graph.to_wider_model(target_id, n_add)
    return graph
Exemple #14
0
def to_skip_connection_model(model):
    """Return skip_connected model

    Args:
        model: the model from which we get skip_connected model

    Returns:
        The skip_connected model
    """
    graph = Graph(model)
    weighted_layers = list(filter(lambda x: is_conv_layer(x), model.layers))
    index_a = randint(0, len(weighted_layers) - 1)
    index_b = randint(0, len(weighted_layers) - 1)
    if index_a > index_b:
        index_a, index_b = index_b, index_a
    a = weighted_layers[index_a]
    b = weighted_layers[index_b]
    if a.input.shape == b.output.shape:
        return graph.to_add_skip_model(a, b)
    elif random() < 0.5:
        return graph.to_add_skip_model(a, b)
    else:
        return graph.to_concat_skip_model(a, b)