def deeper_conv_block(conv_layer, kernel_size, weighted=True):
    filter_shape = (kernel_size, ) * 2
    n_filters = conv_layer.filters
    weight = np.zeros(filter_shape + (n_filters, n_filters))
    center = tuple(map(lambda x: int((x - 1) / 2), filter_shape))
    for i in range(n_filters):
        filter_weight = np.zeros(filter_shape + (n_filters, ))
        index = center + (i, )
        filter_weight[index] = 1
        weight[..., i] = filter_weight
    bias = np.zeros(n_filters)
    new_conv_layer = StubConv(n_filters,
                              kernel_size=filter_shape,
                              func=conv_layer.func)
    bn = StubBatchNormalization()

    if weighted:
        new_conv_layer.set_weights(
            (add_noise(weight,
                       np.array([0, 1])), add_noise(bias, np.array([0, 1]))))
        new_weights = [
            np.ones(n_filters, dtype=np.float32),
            np.zeros(n_filters, dtype=np.float32),
            np.zeros(n_filters, dtype=np.float32),
            np.ones(n_filters, dtype=np.float32)
        ]
        bn.set_weights(new_weights)

    return [
        StubActivation('relu'), new_conv_layer, bn,
        StubDropout(constant.CONV_DROPOUT_RATE)
    ]
Esempio n. 2
0
def deeper_conv_block(conv_layer, kernel_size, weighted=True):
    filter_shape = (kernel_size, ) * 2
    n_filters = conv_layer.filters
    weight = np.zeros((n_filters, n_filters) + filter_shape)
    center = tuple(map(lambda x: int((x - 1) / 2), filter_shape))
    for i in range(n_filters):
        filter_weight = np.zeros((n_filters, ) + filter_shape)
        index = (i, ) + center
        filter_weight[index] = 1
        weight[i, ...] = filter_weight
    bias = np.zeros(n_filters)
    new_conv_layer = StubConv(conv_layer.filters,
                              n_filters,
                              kernel_size=kernel_size)
    bn = StubBatchNormalization(n_filters)

    if weighted:
        new_conv_layer.set_weights(
            (add_noise(weight,
                       np.array([0, 1])), add_noise(bias, np.array([0, 1]))))
        new_weights = [
            add_noise(np.ones(n_filters, dtype=np.float32), np.array([0, 1])),
            add_noise(np.zeros(n_filters, dtype=np.float32), np.array([0, 1])),
            add_noise(np.zeros(n_filters, dtype=np.float32), np.array([0, 1])),
            add_noise(np.ones(n_filters, dtype=np.float32), np.array([0, 1]))
        ]
        bn.set_weights(new_weights)

    return [StubReLU(), new_conv_layer, bn]
Esempio n. 3
0
def wider_bn(layer, start_dim, total_dim, n_add, weighted=True):
    """Get new layer with wider batch normalization for current layer

   Args:
       weighted:
       layer: the layer from which we get new layer with wider batch normalization
       start_dim: the started dimension
       total_dim: the total dimension
       n_add: the output shape

   Returns:
       The new layer with wider batch normalization
   """
    if not weighted:
        return StubBatchNormalization()

    weights = layer.get_weights()

    new_weights = [
        np.ones(n_add, dtype=np.float32),
        np.zeros(n_add, dtype=np.float32),
        np.zeros(n_add, dtype=np.float32),
        np.ones(n_add, dtype=np.float32)
    ]

    student_w = tuple()
    for weight, new_weight in zip(weights, new_weights):
        temp_w = weight.copy()
        temp_w = np.concatenate(
            (temp_w[:start_dim], new_weight, temp_w[start_dim:total_dim]))
        student_w += (temp_w, )
    new_layer = StubBatchNormalization()
    new_layer.set_weights(student_w)
    return new_layer
Esempio n. 4
0
def deeper_conv_block(conv_layer, kernel_size, weighted=True):
    filter_shape = (kernel_size,) * 2
    n_filters = conv_layer.filters
    weight = np.zeros((n_filters, n_filters) + filter_shape)
    center = tuple(map(lambda x: int((x - 1) / 2), filter_shape))
    for i in range(n_filters):
        filter_weight = np.zeros((n_filters,) + filter_shape)
        index = (i,) + center
        filter_weight[index] = 1
        weight[i, ...] = filter_weight
    bias = np.zeros(n_filters)
    new_conv_layer = StubConv(conv_layer.filters, n_filters, kernel_size=kernel_size)
    bn = StubBatchNormalization(n_filters)

    if weighted:
        new_conv_layer.set_weights((add_noise(weight, np.array([0, 1])), add_noise(bias, np.array([0, 1]))))
        new_weights = [add_noise(np.ones(n_filters, dtype=np.float32), np.array([0, 1])),
                       add_noise(np.zeros(n_filters, dtype=np.float32), np.array([0, 1])),
                       add_noise(np.zeros(n_filters, dtype=np.float32), np.array([0, 1])),
                       add_noise(np.ones(n_filters, dtype=np.float32), np.array([0, 1]))]
        bn.set_weights(new_weights)

    return [StubReLU(),
            new_conv_layer,
            bn]
Esempio n. 5
0
def get_conv_dense_model():
    graph = Graph((32, 32, 3), False)
    output_node_id = 0

    output_node_id = graph.add_layer(StubReLU(), output_node_id)
    output_node_id = graph.add_layer(StubConv(3, 3, 3), output_node_id)
    output_node_id = graph.add_layer(StubBatchNormalization(3), output_node_id)

    output_node_id = graph.add_layer(StubReLU(), output_node_id)
    output_node_id = graph.add_layer(StubConv(3, 3, 3), output_node_id)
    output_node_id = graph.add_layer(StubBatchNormalization(3), output_node_id)

    output_node_id = graph.add_layer(StubFlatten(), output_node_id)
    output_node_id = graph.add_layer(StubDropout(Constant.DENSE_DROPOUT_RATE), output_node_id)

    output_node_id = graph.add_layer(StubReLU(), output_node_id)
    output_node_id = graph.add_layer(StubDense(graph.node_list[output_node_id].shape[0], 5),
                                     output_node_id)

    output_node_id = graph.add_layer(StubReLU(), output_node_id)
    output_node_id = graph.add_layer(StubDense(5, 5), output_node_id)
    graph.add_layer(StubSoftmax(), output_node_id)

    graph.produce_model().set_weight_to_graph()

    return graph
Esempio n. 6
0
def get_add_skip_model():
    graph = Graph((5, 5, 3), False)
    output_node_id = 0

    output_node_id = graph.add_layer(StubReLU(), output_node_id)
    output_node_id = graph.add_layer(StubConv(3, 3, 3), output_node_id)
    output_node_id = graph.add_layer(StubBatchNormalization(3), output_node_id)
    output_node_id = graph.add_layer(StubDropout(constant.CONV_DROPOUT_RATE), output_node_id)

    output_node_id = graph.add_layer(StubReLU(), output_node_id)
    output_node_id = graph.add_layer(StubConv(3, 3, 3), output_node_id)
    output_node_id = graph.add_layer(StubBatchNormalization(3), output_node_id)
    output_node_id = graph.add_layer(StubDropout(constant.CONV_DROPOUT_RATE), output_node_id)

    temp_node_id = output_node_id

    output_node_id = graph.add_layer(StubReLU(), output_node_id)
    output_node_id = graph.add_layer(StubConv(3, 3, 3), output_node_id)
    output_node_id = graph.add_layer(StubBatchNormalization(3), output_node_id)
    output_node_id = graph.add_layer(StubDropout(constant.CONV_DROPOUT_RATE), output_node_id)

    temp_node_id = graph.add_layer(StubConv(3, 3, 1), temp_node_id)
    output_node_id = graph.add_layer(StubAdd(), [output_node_id, temp_node_id])

    temp_node_id = output_node_id

    output_node_id = graph.add_layer(StubReLU(), output_node_id)
    output_node_id = graph.add_layer(StubConv(3, 3, 3), output_node_id)
    output_node_id = graph.add_layer(StubBatchNormalization(3), output_node_id)
    output_node_id = graph.add_layer(StubDropout(constant.CONV_DROPOUT_RATE), output_node_id)

    temp_node_id = graph.add_layer(StubConv(3, 3, 1), temp_node_id)
    output_node_id = graph.add_layer(StubAdd(), [output_node_id, temp_node_id])

    output_node_id = graph.add_layer(StubReLU(), output_node_id)
    output_node_id = graph.add_layer(StubConv(3, 3, 3), output_node_id)
    output_node_id = graph.add_layer(StubBatchNormalization(3), output_node_id)
    output_node_id = graph.add_layer(StubDropout(constant.CONV_DROPOUT_RATE), output_node_id)

    output_node_id = graph.add_layer(StubFlatten(), output_node_id)

    output_node_id = graph.add_layer(StubReLU(), output_node_id)
    output_node_id = graph.add_layer(StubDense(graph.node_list[output_node_id].shape[0], 5),
                                     output_node_id)
    output_node_id = graph.add_layer(StubDropout(constant.DENSE_DROPOUT_RATE), output_node_id)

    output_node_id = graph.add_layer(StubReLU(), output_node_id)
    output_node_id = graph.add_layer(StubDense(5, 5), output_node_id)
    output_node_id = graph.add_layer(StubDropout(constant.DENSE_DROPOUT_RATE), output_node_id)
    graph.add_layer(StubSoftmax(), output_node_id)

    graph.produce_model().set_weight_to_graph()

    return graph
Esempio n. 7
0
    def to_add_skip_model(self, start_id, end_id):
        """Add a weighted add skip-connection from after start node to end node.

        Args:
            start_id: The convolutional layer ID, after which to start the skip-connection.
            end_id: The convolutional layer ID, after which to end the skip-connection.
        """
        self.operation_history.append(('to_add_skip_model', start_id, end_id))
        conv_block_input_id = self._conv_block_end_node(start_id)
        conv_block_input_id = self.adj_list[conv_block_input_id][0][0]

        block_last_layer_input_id = self._conv_block_end_node(end_id)

        # Add the pooling layer chain.
        layer_list = self._get_pooling_layers(conv_block_input_id, block_last_layer_input_id)
        skip_output_id = conv_block_input_id
        for index, layer_id in enumerate(layer_list):
            skip_output_id = self.add_layer(deepcopy(self.layer_list[layer_id]), skip_output_id)

        # Add the conv layer
        new_relu_layer = StubReLU()
        skip_output_id = self.add_layer(new_relu_layer, skip_output_id)
        new_conv_layer = StubConv(self.layer_list[start_id].filters, self.layer_list[end_id].filters, 1)
        skip_output_id = self.add_layer(new_conv_layer, skip_output_id)
        new_bn_layer = StubBatchNormalization(self.layer_list[end_id].filters)
        skip_output_id = self.add_layer(new_bn_layer, skip_output_id)

        # Add the add layer.
        block_last_layer_output_id = self.adj_list[block_last_layer_input_id][0][0]
        add_input_node_id = self._add_node(deepcopy(self.node_list[block_last_layer_output_id]))
        add_layer = StubAdd()

        self._redirect_edge(block_last_layer_input_id, block_last_layer_output_id, add_input_node_id)
        self._add_edge(add_layer, add_input_node_id, block_last_layer_output_id)
        self._add_edge(add_layer, skip_output_id, block_last_layer_output_id)
        add_layer.input = [self.node_list[add_input_node_id], self.node_list[skip_output_id]]
        add_layer.output = self.node_list[block_last_layer_output_id]
        self.node_list[block_last_layer_output_id].shape = add_layer.output_shape

        # Set weights to the additional conv layer.
        if self.weighted:
            filters_end = self.layer_list[end_id].filters
            filters_start = self.layer_list[start_id].filters
            filter_shape = (1,) * (len(self.layer_list[end_id].get_weights()[0].shape) - 2)
            weights = np.zeros((filters_end, filters_start) + filter_shape)
            bias = np.zeros(filters_end)
            new_conv_layer.set_weights((add_noise(weights, np.array([0, 1])), add_noise(bias, np.array([0, 1]))))

            n_filters = filters_end
            new_weights = [add_noise(np.ones(n_filters, dtype=np.float32), np.array([0, 1])),
                           add_noise(np.zeros(n_filters, dtype=np.float32), np.array([0, 1])),
                           add_noise(np.zeros(n_filters, dtype=np.float32), np.array([0, 1])),
                           add_noise(np.ones(n_filters, dtype=np.float32), np.array([0, 1]))]
            new_bn_layer.set_weights(new_weights)
Esempio n. 8
0
    def generate(self,
                 model_len=constant.MODEL_LEN,
                 model_width=constant.MODEL_WIDTH):
        pool = self._get_pool_layer_func()
        conv = get_conv_layer_func(len(self._get_shape(3)))
        ave = get_ave_layer_func(len(self._get_shape(3)))

        pooling_len = int(model_len / 4)
        model = StubModel()
        model.input_shape = self.input_shape
        model.inputs = [0]
        model.layers.append(StubInput())
        for i in range(model_len):
            model.layers += [
                StubActivation('relu'),
                StubConv(model_width, kernel_size=3, func=conv),
                StubBatchNormalization(),
                StubDropout(constant.CONV_DROPOUT_RATE)
            ]
            if pooling_len == 0 or ((i + 1) % pooling_len == 0
                                    and i != model_len - 1):
                model.layers.append(StubPooling(func=pool))

        model.layers.append(StubGlobalPooling(ave))
        model.layers.append(StubDense(self.n_classes, activation='softmax'))
        model.outputs = [len(model.layers)]
        for index, layer in enumerate(model.layers):
            layer.input = index
            layer.output = index + 1
        return Graph(model, False)
Esempio n. 9
0
    def generate(self,
                 model_len=Constant.MODEL_LEN,
                 model_width=Constant.MODEL_WIDTH):
        pooling_len = int(model_len / 4)
        graph = Graph(self.input_shape, False)
        temp_input_channel = self.input_shape[-1]
        output_node_id = 0
        for i in range(model_len):
            output_node_id = graph.add_layer(StubReLU(), output_node_id)
            output_node_id = graph.add_layer(
                StubConv(temp_input_channel, model_width, kernel_size=3),
                output_node_id)
            output_node_id = graph.add_layer(
                StubBatchNormalization(model_width), output_node_id)
            temp_input_channel = model_width
            if pooling_len == 0 or ((i + 1) % pooling_len == 0
                                    and i != model_len - 1):
                output_node_id = graph.add_layer(StubPooling(), output_node_id)

        output_node_id = graph.add_layer(StubFlatten(), output_node_id)
        output_node_id = graph.add_layer(
            StubDropout(Constant.CONV_DROPOUT_RATE), output_node_id)
        output_node_id = graph.add_layer(
            StubDense(graph.node_list[output_node_id].shape[0], model_width),
            output_node_id)
        output_node_id = graph.add_layer(StubReLU(), output_node_id)
        output_node_id = graph.add_layer(
            StubDense(model_width, self.n_classes), output_node_id)
        graph.add_layer(StubSoftmax(), output_node_id)
        return graph
Esempio n. 10
0
def to_stub_model(model, weighted=False):
    node_count = 0
    tensor_dict = {}
    ret = StubModel()
    ret.input_shape = model.input_shape
    for layer in model.layers:
        if isinstance(layer.input, list):
            input_nodes = layer.input
        else:
            input_nodes = [layer.input]

        for node in input_nodes + [layer.output]:
            if node not in tensor_dict:
                tensor_dict[node] = StubTensor(get_int_tuple(node.shape))
                node_count += 1

        if isinstance(layer.input, list):
            input_id = []
            for node in layer.input:
                input_id.append(tensor_dict[node])
        else:
            input_id = tensor_dict[layer.input]
        output_id = tensor_dict[layer.output]

        if is_conv_layer(layer):
            temp_stub_layer = StubConv(layer.filters, layer.kernel_size,
                                       layer.__class__, input_id, output_id)
        elif isinstance(layer, Dense):
            temp_stub_layer = StubDense(layer.units, layer.activation,
                                        input_id, output_id)
        elif isinstance(layer, WeightedAdd):
            temp_stub_layer = StubWeightedAdd(input_id, output_id)
        elif isinstance(layer, Concatenate):
            temp_stub_layer = StubConcatenate(input_id, output_id)
        elif isinstance(layer, BatchNormalization):
            temp_stub_layer = StubBatchNormalization(input_id, output_id)
        elif isinstance(layer, Activation):
            temp_stub_layer = StubActivation(layer.activation, input_id,
                                             output_id)
        elif isinstance(layer, InputLayer):
            temp_stub_layer = StubLayer(input_id, output_id)
        elif isinstance(layer, Flatten):
            temp_stub_layer = StubFlatten(input_id, output_id)
        elif isinstance(layer, Dropout):
            temp_stub_layer = StubDropout(layer.rate, input_id, output_id)
        elif is_pooling_layer(layer):
            temp_stub_layer = StubPooling(layer.__class__, input_id, output_id)
        elif is_global_pooling_layer(layer):
            temp_stub_layer = StubGlobalPooling(layer.__class__, input_id,
                                                output_id)
        else:
            raise TypeError("The layer {} is illegal.".format(layer))
        if weighted:
            temp_stub_layer.set_weights(layer.get_weights())
        ret.add_layer(temp_stub_layer)
    ret.inputs = [tensor_dict[model.inputs[0]]]
    ret.outputs = [tensor_dict[model.outputs[0]]]
    return ret
Esempio n. 11
0
def wider_bn(layer, start_dim, total_dim, n_add, weighted=True):
    if not weighted:
        return StubBatchNormalization(layer.num_features + n_add)

    weights = layer.get_weights()

    new_weights = [add_noise(np.ones(n_add, dtype=np.float32), np.array([0, 1])),
                   add_noise(np.zeros(n_add, dtype=np.float32), np.array([0, 1])),
                   add_noise(np.zeros(n_add, dtype=np.float32), np.array([0, 1])),
                   add_noise(np.ones(n_add, dtype=np.float32), np.array([0, 1]))]

    student_w = tuple()
    for weight, new_weight in zip(weights, new_weights):
        temp_w = weight.copy()
        temp_w = np.concatenate((temp_w[:start_dim], new_weight, temp_w[start_dim:total_dim]))
        student_w += (temp_w,)
    new_layer = StubBatchNormalization(layer.num_features + n_add)
    new_layer.set_weights(student_w)
    return new_layer
Esempio n. 12
0
def wider_bn(layer, start_dim, total_dim, n_add, weighted=True):
    if not weighted:
        return StubBatchNormalization(layer.num_features + n_add)

    weights = layer.get_weights()

    new_weights = [add_noise(np.ones(n_add, dtype=np.float32), np.array([0, 1])),
                   add_noise(np.zeros(n_add, dtype=np.float32), np.array([0, 1])),
                   add_noise(np.zeros(n_add, dtype=np.float32), np.array([0, 1])),
                   add_noise(np.ones(n_add, dtype=np.float32), np.array([0, 1]))]

    student_w = tuple()
    for weight, new_weight in zip(weights, new_weights):
        temp_w = weight.copy()
        temp_w = np.concatenate((temp_w[:start_dim], new_weight, temp_w[start_dim:total_dim]))
        student_w += (temp_w,)
    new_layer = StubBatchNormalization(layer.num_features + n_add)
    new_layer.set_weights(student_w)
    return new_layer
Esempio n. 13
0
def to_stub_model(model):
    node_count = 0
    node_to_id = {}
    ret = StubModel()
    ret.input_shape = model.input_shape
    for layer in model.layers:
        if isinstance(layer.input, list):
            input_nodes = layer.input
        else:
            input_nodes = [layer.input]

        for node in input_nodes + [layer.output]:
            if node not in node_to_id:
                node_to_id[node] = node_count
                node_count += 1

        if isinstance(layer.input, list):
            input_id = []
            for node in layer.input:
                input_id.append(node_to_id[node])
        else:
            input_id = node_to_id[layer.input]
        output_id = node_to_id[layer.output]

        if is_conv_layer(layer):
            temp_stub_layer = StubConv(layer.filters, input_id, output_id)
        elif isinstance(layer, Dense):
            temp_stub_layer = StubDense(layer.units, input_id, output_id)
        elif isinstance(layer, WeightedAdd):
            temp_stub_layer = StubWeightedAdd(input_id, output_id)
        elif isinstance(layer, Concatenate):
            temp_stub_layer = StubConcatenate(input_id, output_id)
        elif isinstance(layer, BatchNormalization):
            temp_stub_layer = StubBatchNormalization(input_id, output_id)
        elif isinstance(layer, Activation):
            temp_stub_layer = StubActivation(input_id, output_id)
        elif isinstance(layer, InputLayer):
            temp_stub_layer = StubLayer(input_id, output_id)
        elif isinstance(layer, Flatten):
            temp_stub_layer = StubLayer(input_id, output_id)
        elif isinstance(layer, Dropout):
            temp_stub_layer = StubLayer(input_id, output_id)
        elif is_pooling_layer(layer):
            temp_stub_layer = StubPooling(input_id, output_id)
        else:
            raise TypeError("The layer {} is illegal.".format(layer))
        ret.add_layer(temp_stub_layer)

    return ret
Esempio n. 14
0
 def _wider_bn(self, layer, start_dim, total_dim, n_add):
     return StubBatchNormalization()
Esempio n. 15
0
    def to_concat_skip_model(self, start_id, end_id):
        """Add a weighted add concatenate connection from after start node to end node.

        Args:
            start_id: The convolutional layer ID, after which to start the skip-connection.
            end_id: The convolutional layer ID, after which to end the skip-connection.
        """
        self.operation_history.append(
            ('to_concat_skip_model', start_id, end_id))
        conv_block_input_id = self._conv_block_end_node(start_id)
        conv_block_input_id = self.adj_list[conv_block_input_id][0][0]

        block_last_layer_input_id = self._conv_block_end_node(end_id)

        # Add the pooling layer chain.
        pooling_layer_list = self._get_pooling_layers(
            conv_block_input_id, block_last_layer_input_id)
        skip_output_id = conv_block_input_id
        for index, layer_id in enumerate(pooling_layer_list):
            skip_output_id = self.add_layer(
                deepcopy(self.layer_list[layer_id]), skip_output_id)

        block_last_layer_output_id = self.adj_list[block_last_layer_input_id][
            0][0]
        concat_input_node_id = self._add_node(
            deepcopy(self.node_list[block_last_layer_output_id]))
        self._redirect_edge(block_last_layer_input_id,
                            block_last_layer_output_id, concat_input_node_id)

        concat_layer = StubConcatenate()
        concat_layer.input = [
            self.node_list[concat_input_node_id],
            self.node_list[skip_output_id]
        ]
        concat_output_node_id = self._add_node(Node(concat_layer.output_shape))
        self._add_edge(concat_layer, concat_input_node_id,
                       concat_output_node_id)
        self._add_edge(concat_layer, skip_output_id, concat_output_node_id)
        concat_layer.output = self.node_list[concat_output_node_id]
        self.node_list[concat_output_node_id].shape = concat_layer.output_shape

        # Add the concatenate layer.
        new_relu_layer = StubReLU()
        concat_output_node_id = self.add_layer(new_relu_layer,
                                               concat_output_node_id)
        new_conv_layer = StubConv(
            self.layer_list[start_id].filters +
            self.layer_list[end_id].filters, self.layer_list[end_id].filters,
            1)
        concat_output_node_id = self.add_layer(new_conv_layer,
                                               concat_output_node_id)
        new_bn_layer = StubBatchNormalization(self.layer_list[end_id].filters)

        self._add_edge(new_bn_layer, concat_output_node_id,
                       block_last_layer_output_id)
        new_bn_layer.input = self.node_list[concat_output_node_id]
        new_bn_layer.output = self.node_list[block_last_layer_output_id]
        self.node_list[
            block_last_layer_output_id].shape = new_bn_layer.output_shape

        if self.weighted:
            filters_end = self.layer_list[end_id].filters
            filters_start = self.layer_list[start_id].filters
            filter_shape = (1, ) * (
                len(self.layer_list[end_id].get_weights()[0].shape) - 2)
            weights = np.zeros((filters_end, filters_end) + filter_shape)
            for i in range(filters_end):
                filter_weight = np.zeros((filters_end, ) + filter_shape)
                filter_weight[(i, 0, 0)] = 1
                weights[i, ...] = filter_weight
            weights = np.concatenate(
                (weights,
                 np.zeros((filters_end, filters_start) + filter_shape)),
                axis=1)
            bias = np.zeros(filters_end)
            new_conv_layer.set_weights((add_noise(weights, np.array([0, 1])),
                                        add_noise(bias, np.array([0, 1]))))

            n_filters = filters_end
            new_weights = [
                add_noise(np.ones(n_filters, dtype=np.float32),
                          np.array([0, 1])),
                add_noise(np.zeros(n_filters, dtype=np.float32),
                          np.array([0, 1])),
                add_noise(np.zeros(n_filters, dtype=np.float32),
                          np.array([0, 1])),
                add_noise(np.ones(n_filters, dtype=np.float32),
                          np.array([0, 1]))
            ]
            new_bn_layer.set_weights(new_weights)
Esempio n. 16
0
    def to_add_skip_model(self, start_id, end_id):
        """Add a weighted add skip-connection from after start node to end node.

        Args:
            start_id: The convolutional layer ID, after which to start the skip-connection.
            end_id: The convolutional layer ID, after which to end the skip-connection.
        """
        self.operation_history.append(('to_add_skip_model', start_id, end_id))
        conv_block_input_id = self._conv_block_end_node(start_id)
        conv_block_input_id = self.adj_list[conv_block_input_id][0][0]

        block_last_layer_input_id = self._conv_block_end_node(end_id)

        # Add the pooling layer chain.
        layer_list = self._get_pooling_layers(conv_block_input_id,
                                              block_last_layer_input_id)
        skip_output_id = conv_block_input_id
        for index, layer_id in enumerate(layer_list):
            skip_output_id = self.add_layer(
                deepcopy(self.layer_list[layer_id]), skip_output_id)

        # Add the conv layer
        new_relu_layer = StubReLU()
        skip_output_id = self.add_layer(new_relu_layer, skip_output_id)
        new_conv_layer = StubConv(self.layer_list[start_id].filters,
                                  self.layer_list[end_id].filters, 1)
        skip_output_id = self.add_layer(new_conv_layer, skip_output_id)
        new_bn_layer = StubBatchNormalization(self.layer_list[end_id].filters)
        skip_output_id = self.add_layer(new_bn_layer, skip_output_id)

        # Add the add layer.
        block_last_layer_output_id = self.adj_list[block_last_layer_input_id][
            0][0]
        add_input_node_id = self._add_node(
            deepcopy(self.node_list[block_last_layer_output_id]))
        add_layer = StubAdd()

        self._redirect_edge(block_last_layer_input_id,
                            block_last_layer_output_id, add_input_node_id)
        self._add_edge(add_layer, add_input_node_id,
                       block_last_layer_output_id)
        self._add_edge(add_layer, skip_output_id, block_last_layer_output_id)
        add_layer.input = [
            self.node_list[add_input_node_id], self.node_list[skip_output_id]
        ]
        add_layer.output = self.node_list[block_last_layer_output_id]
        self.node_list[
            block_last_layer_output_id].shape = add_layer.output_shape

        # Set weights to the additional conv layer.
        if self.weighted:
            filters_end = self.layer_list[end_id].filters
            filters_start = self.layer_list[start_id].filters
            filter_shape = (1, ) * (
                len(self.layer_list[end_id].get_weights()[0].shape) - 2)
            weights = np.zeros((filters_end, filters_start) + filter_shape)
            bias = np.zeros(filters_end)
            new_conv_layer.set_weights((add_noise(weights, np.array([0, 1])),
                                        add_noise(bias, np.array([0, 1]))))

            n_filters = filters_end
            new_weights = [
                add_noise(np.ones(n_filters, dtype=np.float32),
                          np.array([0, 1])),
                add_noise(np.zeros(n_filters, dtype=np.float32),
                          np.array([0, 1])),
                add_noise(np.zeros(n_filters, dtype=np.float32),
                          np.array([0, 1])),
                add_noise(np.ones(n_filters, dtype=np.float32),
                          np.array([0, 1]))
            ]
            new_bn_layer.set_weights(new_weights)
Esempio n. 17
0
    def to_concat_skip_model(self, start_id, end_id):
        """Add a weighted add concatenate connection from after start node to end node.

        Args:
            start_id: The convolutional layer ID, after which to start the skip-connection.
            end_id: The convolutional layer ID, after which to end the skip-connection.
        """
        self.operation_history.append(('to_concat_skip_model', start_id, end_id))
        conv_block_input_id = self._conv_block_end_node(start_id)
        conv_block_input_id = self.adj_list[conv_block_input_id][0][0]

        block_last_layer_input_id = self._conv_block_end_node(end_id)

        # Add the pooling layer chain.
        pooling_layer_list = self._get_pooling_layers(conv_block_input_id, block_last_layer_input_id)
        skip_output_id = conv_block_input_id
        for index, layer_id in enumerate(pooling_layer_list):
            skip_output_id = self.add_layer(deepcopy(self.layer_list[layer_id]), skip_output_id)

        block_last_layer_output_id = self.adj_list[block_last_layer_input_id][0][0]
        concat_input_node_id = self._add_node(deepcopy(self.node_list[block_last_layer_output_id]))
        self._redirect_edge(block_last_layer_input_id, block_last_layer_output_id, concat_input_node_id)

        concat_layer = StubConcatenate()
        concat_layer.input = [self.node_list[concat_input_node_id], self.node_list[skip_output_id]]
        concat_output_node_id = self._add_node(Node(concat_layer.output_shape))
        self._add_edge(concat_layer, concat_input_node_id, concat_output_node_id)
        self._add_edge(concat_layer, skip_output_id, concat_output_node_id)
        concat_layer.output = self.node_list[concat_output_node_id]
        self.node_list[concat_output_node_id].shape = concat_layer.output_shape

        # Add the concatenate layer.
        new_relu_layer = StubReLU()
        concat_output_node_id = self.add_layer(new_relu_layer, concat_output_node_id)
        new_conv_layer = StubConv(self.layer_list[start_id].filters + self.layer_list[end_id].filters,
                                  self.layer_list[end_id].filters, 1)
        concat_output_node_id = self.add_layer(new_conv_layer, concat_output_node_id)
        new_bn_layer = StubBatchNormalization(self.layer_list[end_id].filters)

        self._add_edge(new_bn_layer, concat_output_node_id, block_last_layer_output_id)
        new_bn_layer.input = self.node_list[concat_output_node_id]
        new_bn_layer.output = self.node_list[block_last_layer_output_id]
        self.node_list[block_last_layer_output_id].shape = new_bn_layer.output_shape

        if self.weighted:
            filters_end = self.layer_list[end_id].filters
            filters_start = self.layer_list[start_id].filters
            filter_shape = (1,) * (len(self.layer_list[end_id].get_weights()[0].shape) - 2)
            weights = np.zeros((filters_end, filters_end) + filter_shape)
            for i in range(filters_end):
                filter_weight = np.zeros((filters_end,) + filter_shape)
                filter_weight[(i, 0, 0)] = 1
                weights[i, ...] = filter_weight
            weights = np.concatenate((weights,
                                      np.zeros((filters_end, filters_start) + filter_shape)), axis=1)
            bias = np.zeros(filters_end)
            new_conv_layer.set_weights((add_noise(weights, np.array([0, 1])), add_noise(bias, np.array([0, 1]))))

            n_filters = filters_end
            new_weights = [add_noise(np.ones(n_filters, dtype=np.float32), np.array([0, 1])),
                           add_noise(np.zeros(n_filters, dtype=np.float32), np.array([0, 1])),
                           add_noise(np.zeros(n_filters, dtype=np.float32), np.array([0, 1])),
                           add_noise(np.ones(n_filters, dtype=np.float32), np.array([0, 1]))]
            new_bn_layer.set_weights(new_weights)
Esempio n. 18
0
 def _deeper_conv_block(self, target, kernel_size):
     return [
         StubConv(self._layer_width(target)),
         StubBatchNormalization(),
         StubActivation()
     ]