示例#1
0
def create_new_layer(input_shape, n_dim):
    dense_deeper_classes = [StubDense, get_dropout_class(n_dim), StubReLU]
    conv_deeper_classes = [
        get_conv_class(n_dim),
        get_batch_norm_class(n_dim), StubReLU
    ]
    if len(input_shape) == 1:
        # It is in the dense layer part.
        layer_class = sample(dense_deeper_classes, 1)[0]
    else:
        # It is in the conv layer part.
        layer_class = sample(conv_deeper_classes, 1)[0]

    if layer_class == StubDense:
        new_layer = StubDense(input_shape[0], input_shape[0])

    elif layer_class == get_dropout_class(n_dim):
        new_layer = layer_class(Constant.DENSE_DROPOUT_RATE)

    elif layer_class == get_conv_class(n_dim):
        new_layer = layer_class(input_shape[-1],
                                input_shape[-1],
                                sample((1, 3, 5), 1)[0],
                                stride=1)

    elif layer_class == get_batch_norm_class(n_dim):
        new_layer = layer_class(input_shape[-1])

    elif layer_class == get_pooling_class(n_dim):
        new_layer = layer_class(sample((1, 3, 5), 1)[0])

    else:
        new_layer = layer_class()

    return new_layer
示例#2
0
def wider_pre_conv(layer, n_add_filters, weighted=True):
    n_dim = get_n_dim(layer)
    if not weighted:
        return get_conv_class(n_dim)(layer.input_channel,
                                     layer.filters + n_add_filters,
                                     kernel_size=layer.kernel_size,
                                     stride=layer.stride)

    n_pre_filters = layer.filters
    rand = np.random.randint(n_pre_filters, size=n_add_filters)
    teacher_w, teacher_b = layer.get_weights()

    student_w = teacher_w.copy()
    student_b = teacher_b.copy()
    # target layer update (i)
    for i in range(len(rand)):
        teacher_index = rand[i]
        new_weight = teacher_w[teacher_index, ...]
        new_weight = new_weight[np.newaxis, ...]
        student_w = np.concatenate((student_w, new_weight), axis=0)
        student_b = np.append(student_b, teacher_b[teacher_index])
    new_pre_layer = get_conv_class(n_dim)(layer.input_channel,
                                          n_pre_filters + n_add_filters,
                                          kernel_size=layer.kernel_size,
                                          stride=layer.stride)
    new_pre_layer.set_weights((add_noise(student_w, teacher_w), add_noise(student_b, teacher_b)))
    return new_pre_layer
def wider_pre_conv(layer, n_add_filters, weighted=True):
    n_dim = get_n_dim(layer)
    if not weighted:
        return get_conv_class(n_dim)(layer.input_channel,
                                     layer.filters + n_add_filters,
                                     kernel_size=layer.kernel_size,
                                     stride=layer.stride,
                                     padding =layer.padding,
                                     groups=layer.groups)

    n_pre_filters = layer.filters
    rand = np.random.randint(n_pre_filters, size=n_add_filters)
    teacher_w, teacher_b = layer.get_weights()

    student_w = teacher_w.copy()
    student_b = teacher_b.copy()
    # target layer update (i)
    for i in range(len(rand)):
        teacher_index = rand[i]
        new_weight = teacher_w[teacher_index, ...]
        new_weight = new_weight[np.newaxis, ...]
        student_w = np.concatenate((student_w, new_weight), axis=0)
        student_b = np.append(student_b, teacher_b[teacher_index])
    new_pre_layer = get_conv_class(n_dim)(layer.input_channel,
                                          n_pre_filters + n_add_filters,
                                          kernel_size=layer.kernel_size,
                                          stride=layer.stride,
                                          padding =layer.padding,
                                          groups=layer.groups)
    new_pre_layer.set_weights((add_noise(student_w, teacher_w), add_noise(student_b, teacher_b)))
    return new_pre_layer
def wider_next_conv(layer, start_dim, total_dim, n_add, weighted=True):
    n_dim = get_n_dim(layer)
    groups = layer.groups + n_add if layer.groups is not None and layer.groups > 1 else 1
    if not weighted:
        return get_conv_class(n_dim)(layer.input_channel + n_add,
                                     layer.filters,
                                     kernel_size=layer.kernel_size,
                                     stride=layer.stride,
                                     padding =layer.padding,
                                     groups =groups)
    n_filters = layer.filters
    new_layer = get_conv_class(n_dim)(layer.input_channel + n_add,
                                      n_filters,
                                      kernel_size=layer.kernel_size,
                                      stride=layer.stride,
                                      padding =layer.padding,
                                      groups =groups)
    teacher_w, teacher_b = layer.get_weights()

    if layer.groups is not None and layer.groups > 1:
        new_layer.set_weights((teacher_w, teacher_b))
    else:
        new_weight_shape = list(teacher_w.shape)
        new_weight_shape[1] = n_add
        new_weight = np.zeros(tuple(new_weight_shape))
        student_w = np.concatenate((teacher_w[:, :start_dim, ...].copy(),
                                add_noise(new_weight, teacher_w),
                                teacher_w[:, start_dim:total_dim, ...].copy()), axis=1)
        new_layer.set_weights((student_w, teacher_b))
        
    return new_layer
示例#5
0
def deeper_conv_block(conv_layer, kernel_size, weighted=True):
    n_dim = get_n_dim(conv_layer)
    filter_shape = (kernel_size, ) * 2
    n_filters = conv_layer.filters
    weight = np.zeros((n_filters, n_filters) + filter_shape)
    center = tuple(map(lambda x: int((x - 1) / 2), filter_shape))
    for i in range(n_filters):
        filter_weight = np.zeros((n_filters, ) + filter_shape)
        index = (i, ) + center
        filter_weight[index] = 1
        weight[i, ...] = filter_weight
    bias = np.zeros(n_filters)
    new_conv_layer = get_conv_class(n_dim)(conv_layer.filters,
                                           n_filters,
                                           kernel_size=kernel_size)
    bn = get_batch_norm_class(n_dim)(n_filters)

    if weighted:
        new_conv_layer.set_weights(
            (add_noise(weight,
                       np.array([0, 1])), add_noise(bias, np.array([0, 1]))))
        new_weights = [
            add_noise(np.ones(n_filters, dtype=np.float32), np.array([0, 1])),
            add_noise(np.zeros(n_filters, dtype=np.float32), np.array([0, 1])),
            add_noise(np.zeros(n_filters, dtype=np.float32), np.array([0, 1])),
            add_noise(np.ones(n_filters, dtype=np.float32), np.array([0, 1]))
        ]
        bn.set_weights(new_weights)

    return [StubReLU(), new_conv_layer, bn]
示例#6
0
    def __init__(self, input_shape, weighted=True):
        """Initializer for Graph.

        Args:
            input_shape: A tuple describing the input tensor shape, not including the number of instances.
            weighted: A boolean marking if there are actual values in the weights of the layers.
                Sometime we only need the neural architecture information with a graph. In that case,
                we do not save the weights to save memory and time.
        """
        self.input_shape = input_shape
        self.weighted = weighted
        self.node_list = []
        self.layer_list = []
        # node id start with 0
        self.node_to_id = {}
        self.layer_to_id = {}
        self.layer_id_to_input_node_ids = {}
        self.layer_id_to_output_node_ids = {}
        self.adj_list = {}
        self.reverse_adj_list = {}
        self.operation_history = []
        self.n_dim = len(input_shape) - 1
        self.conv = get_conv_class(self.n_dim)
        self.batch_norm = get_batch_norm_class(self.n_dim)

        self.vis = None
        self._add_node(Node(input_shape))
示例#7
0
 def __init__(self, n_output_node, input_shape):
     super(MobileNetV2Generator, self).__init__(n_output_node, input_shape)
       
     """ configuration for complete net:
     self.cfg = [(1,  16, 1, 1),
        (6,  24, 2, 1) ,  # NOTE: change stride 2 -> 1 for CIFAR10
        (6,  32, 3, 2),
        (6,  64, 4, 2),
        (6,  96, 3, 1),
        (6, 160, 3, 2),
        (6, 320, 1, 1)]
     """
     
     # we try smaller net configuration (so autokeras will be able to expand the net)
     self.cfg = [(1,  16, 1, 1),
        (6,  24, 2, 1)] # ,  # NOTE: change stride 2 -> 1 for CIFAR10
        #(6,  32, 3, 2) ,
        #(6,  64, 4, 2),
        #(6,  96, 3, 1),
        #(6, 160, 3, 2),
        #(6, 320, 1, 1)]
     
     self.in_planes = 32
     self.block_expansion = 1
     self.n_dim = len(self.input_shape) - 1
     
     if len(self.input_shape) > 4:
         raise ValueError('The input dimension is too high.')
     elif len(self.input_shape) < 2:
         raise ValueError('The input dimension is too low.')
     self.conv = get_conv_class(self.n_dim)
     self.dropout = get_dropout_class(self.n_dim)
     self.global_avg_pooling = get_global_avg_pooling_class(self.n_dim)
     self.adaptive_avg_pooling = get_global_avg_pooling_class(self.n_dim)
     self.batch_norm = get_batch_norm_class(self.n_dim)
示例#8
0
    def to_concat_skip_model(self, start_id, end_id):
        """Add a weighted add concatenate connection from after start node to end node.

        Args:
            start_id: The convolutional layer ID, after which to start the skip-connection.
            end_id: The convolutional layer ID, after which to end the skip-connection.
        """
        self.operation_history.append(
            ('to_concat_skip_model', start_id, end_id))
        filters_end = self.layer_list[end_id].output.shape[-1]
        filters_start = self.layer_list[start_id].output.shape[-1]
        start_node_id = self.layer_id_to_output_node_ids[start_id][0]

        pre_end_node_id = self.layer_id_to_input_node_ids[end_id][0]
        end_node_id = self.layer_id_to_output_node_ids[end_id][0]

        skip_output_id = self._insert_pooling_layer_chain(
            start_node_id, end_node_id)

        concat_input_node_id = self._add_node(
            deepcopy(self.node_list[end_node_id]))
        self._redirect_edge(pre_end_node_id, end_node_id, concat_input_node_id)

        concat_layer = StubConcatenate()
        concat_layer.input = [
            self.node_list[concat_input_node_id],
            self.node_list[skip_output_id]
        ]
        concat_output_node_id = self._add_node(Node(concat_layer.output_shape))
        self._add_edge(concat_layer, concat_input_node_id,
                       concat_output_node_id)
        self._add_edge(concat_layer, skip_output_id, concat_output_node_id)
        concat_layer.output = self.node_list[concat_output_node_id]
        self.node_list[concat_output_node_id].shape = concat_layer.output_shape

        # Add the concatenate layer.
        new_conv_layer = get_conv_class(self.n_dim)(
            filters_start + filters_end, filters_end, 1)
        self._add_edge(new_conv_layer, concat_output_node_id, end_node_id)
        new_conv_layer.input = self.node_list[concat_output_node_id]
        new_conv_layer.output = self.node_list[end_node_id]
        self.node_list[end_node_id].shape = new_conv_layer.output_shape

        if self.weighted:
            filter_shape = (1, ) * self.n_dim
            weights = np.zeros((filters_end, filters_end) + filter_shape)
            for i in range(filters_end):
                filter_weight = np.zeros((filters_end, ) + filter_shape)
                center_index = (i, ) + (0, ) * self.n_dim
                filter_weight[center_index] = 1
                weights[i, ...] = filter_weight
            weights = np.concatenate(
                (weights,
                 np.zeros((filters_end, filters_start) + filter_shape)),
                axis=1)
            bias = np.zeros(filters_end)
            new_conv_layer.set_weights((add_noise(weights, np.array([0, 1])),
                                        add_noise(bias, np.array([0, 1]))))
示例#9
0
 def _insert_pooling_layer_chain(self, start_node_id, end_node_id):
     skip_output_id = start_node_id
     for layer in self._get_pooling_layers(start_node_id, end_node_id):
         new_layer = deepcopy(layer)
         if is_layer(new_layer, 'Conv'):
             filters = self.node_list[start_node_id].shape[-1]
             new_layer = get_conv_class(self.n_dim)(filters, filters, 1, layer.stride)
         else:
             new_layer = deepcopy(layer)
         skip_output_id = self.add_layer(new_layer, skip_output_id)
     skip_output_id = self.add_layer(StubReLU(), skip_output_id)
     return skip_output_id
示例#10
0
 def __init__(self, n_output_node, input_shape):
     super(CnnGenerator, self).__init__(n_output_node, input_shape)
     self.n_dim = len(self.input_shape) - 1
     if len(self.input_shape) > 4:
         raise ValueError('The input dimension is too high.')
     if len(self.input_shape) < 2:
         raise ValueError('The input dimension is too low.')
     self.conv = get_conv_class(self.n_dim)
     self.dropout = get_dropout_class(self.n_dim)
     self.global_avg_pooling = get_global_avg_pooling_class(self.n_dim)
     self.pooling = get_pooling_class(self.n_dim)
     self.batch_norm = get_batch_norm_class(self.n_dim)
示例#11
0
def wider_next_conv(layer, start_dim, total_dim, n_add, weighted=True):
    n_dim = get_n_dim(layer)
    if not weighted:
        return get_conv_class(n_dim)(layer.input_channel + n_add,
                                     layer.filters,
                                     kernel_size=layer.kernel_size)
    n_filters = layer.filters
    teacher_w, teacher_b = layer.get_weights()

    new_weight_shape = list(teacher_w.shape)
    new_weight_shape[1] = n_add
    new_weight = np.zeros(tuple(new_weight_shape))

    student_w = np.concatenate(
        (teacher_w[:, :start_dim, ...].copy(), add_noise(
            new_weight, teacher_w), teacher_w[:, start_dim:total_dim,
                                              ...].copy()),
        axis=1)
    new_layer = get_conv_class(n_dim)(layer.input_channel + n_add, n_filters,
                                      layer.kernel_size)
    new_layer.set_weights((student_w, teacher_b))
    return new_layer
示例#12
0
def create_new_layer(layer, n_dim):
    input_shape = layer.output.shape
    dense_deeper_classes = [StubDense, get_dropout_class(n_dim), StubReLU]
    conv_deeper_classes = [get_conv_class(n_dim), get_batch_norm_class(n_dim), StubReLU]
    if is_layer(layer, LayerType.RELU):
        conv_deeper_classes = [get_conv_class(n_dim), get_batch_norm_class(n_dim)]
        dense_deeper_classes = [StubDense, get_dropout_class(n_dim)]
    elif is_layer(layer, LayerType.DROPOUT):
        dense_deeper_classes = [StubDense, StubReLU]
    elif is_layer(layer, LayerType.BATCH_NORM):
        conv_deeper_classes = [get_conv_class(n_dim), StubReLU]

    if len(input_shape) == 1:
        # It is in the dense layer part.
        layer_class = sample(dense_deeper_classes, 1)[0]
    else:
        # It is in the conv layer part.
        layer_class = sample(conv_deeper_classes, 1)[0]

    if layer_class == StubDense:
        new_layer = StubDense(input_shape[0], input_shape[0])

    elif layer_class == get_dropout_class(n_dim):
        new_layer = layer_class(Constant.DENSE_DROPOUT_RATE)

    elif layer_class == get_conv_class(n_dim):
        new_layer = layer_class(input_shape[-1], input_shape[-1], sample((1, 3, 5), 1)[0], stride=1)

    elif layer_class == get_batch_norm_class(n_dim):
        new_layer = layer_class(input_shape[-1])

    elif layer_class == get_pooling_class(n_dim):
        new_layer = layer_class(sample((1, 3, 5), 1)[0])

    else:
        new_layer = layer_class()

    return new_layer
示例#13
0
def wider_next_conv(layer, start_dim, total_dim, n_add, weighted=True):
    n_dim = get_n_dim(layer)
    if not weighted:
        return get_conv_class(n_dim)(layer.input_channel + n_add,
                                     layer.filters,
                                     kernel_size=layer.kernel_size,
                                     stride=layer.stride)
    n_filters = layer.filters
    teacher_w, teacher_b = layer.get_weights()

    new_weight_shape = list(teacher_w.shape)
    new_weight_shape[1] = n_add
    new_weight = np.zeros(tuple(new_weight_shape))

    student_w = np.concatenate((teacher_w[:, :start_dim, ...].copy(),
                                add_noise(new_weight, teacher_w),
                                teacher_w[:, start_dim:total_dim, ...].copy()), axis=1)
    new_layer = get_conv_class(n_dim)(layer.input_channel + n_add,
                                      n_filters,
                                      kernel_size=layer.kernel_size,
                                      stride=layer.stride)
    new_layer.set_weights((student_w, teacher_b))
    return new_layer
示例#14
0
 def __init__(self, n_output_node, input_shape):
     super(ResNetGenerator, self).__init__(n_output_node, input_shape)
     # self.layers = [2, 2, 2, 2]
     self.in_planes = 64
     self.block_expansion = 1
     self.n_dim = len(self.input_shape) - 1
     if len(self.input_shape) > 4:
         raise ValueError('The input dimension is too high.')
     elif len(self.input_shape) < 2:
         raise ValueError('The input dimension is too low.')
     self.conv = get_conv_class(self.n_dim)
     self.dropout = get_dropout_class(self.n_dim)
     self.global_avg_pooling = get_global_avg_pooling_class(self.n_dim)
     self.adaptive_avg_pooling = get_global_avg_pooling_class(self.n_dim)
     self.batch_norm = get_batch_norm_class(self.n_dim)
示例#15
0
文件: graph.py 项目: Saiuz/autokeras
    def to_concat_skip_model(self, start_id, end_id):
        """Add a weighted add concatenate connection from after start node to end node.

        Args:
            start_id: The convolutional layer ID, after which to start the skip-connection.
            end_id: The convolutional layer ID, after which to end the skip-connection.
        """
        self.operation_history.append(('to_concat_skip_model', start_id, end_id))
        filters_end = self.layer_list[end_id].output.shape[-1]
        filters_start = self.layer_list[start_id].output.shape[-1]
        start_node_id = self.layer_id_to_output_node_ids[start_id][0]

        pre_end_node_id = self.layer_id_to_input_node_ids[end_id][0]
        end_node_id = self.layer_id_to_output_node_ids[end_id][0]

        skip_output_id = self._insert_pooling_layer_chain(start_node_id, end_node_id)

        concat_input_node_id = self._add_node(deepcopy(self.node_list[end_node_id]))
        self._redirect_edge(pre_end_node_id, end_node_id, concat_input_node_id)

        concat_layer = StubConcatenate()
        concat_layer.input = [self.node_list[concat_input_node_id], self.node_list[skip_output_id]]
        concat_output_node_id = self._add_node(Node(concat_layer.output_shape))
        self._add_edge(concat_layer, concat_input_node_id, concat_output_node_id)
        self._add_edge(concat_layer, skip_output_id, concat_output_node_id)
        concat_layer.output = self.node_list[concat_output_node_id]
        self.node_list[concat_output_node_id].shape = concat_layer.output_shape

        # Add the concatenate layer.
        new_conv_layer = get_conv_class(self.n_dim)(filters_start + filters_end,
                                                    filters_end, 1)
        self._add_edge(new_conv_layer, concat_output_node_id, end_node_id)
        new_conv_layer.input = self.node_list[concat_output_node_id]
        new_conv_layer.output = self.node_list[end_node_id]
        self.node_list[end_node_id].shape = new_conv_layer.output_shape

        if self.weighted:
            filter_shape = (1,) * self.n_dim
            weights = np.zeros((filters_end, filters_end) + filter_shape)
            for i in range(filters_end):
                filter_weight = np.zeros((filters_end,) + filter_shape)
                center_index = (i,) + (0,) * self.n_dim
                filter_weight[center_index] = 1
                weights[i, ...] = filter_weight
            weights = np.concatenate((weights,
                                      np.zeros((filters_end, filters_start) + filter_shape)), axis=1)
            bias = np.zeros(filters_end)
            new_conv_layer.set_weights((add_noise(weights, np.array([0, 1])), add_noise(bias, np.array([0, 1]))))
示例#16
0
 def __init__(self, n_output_node, input_shape):
     super().__init__(n_output_node, input_shape)
     # DenseNet Constant
     self.num_init_features = 64
     self.growth_rate = 32
     self.block_config = (6, 12, 24, 16)
     self.bn_size = 4
     self.drop_rate = 0
     # Stub layers
     self.n_dim = len(self.input_shape) - 1
     self.conv = get_conv_class(self.n_dim)
     self.dropout = get_dropout_class(self.n_dim)
     self.global_avg_pooling = get_global_avg_pooling_class(self.n_dim)
     self.adaptive_avg_pooling = get_global_avg_pooling_class(self.n_dim)
     self.max_pooling = get_pooling_class(self.n_dim)
     self.avg_pooling = get_avg_pooling_class(self.n_dim)
     self.batch_norm = get_batch_norm_class(self.n_dim)
示例#17
0
 def _insert_pooling_layer_chain(self, start_node_id, end_node_id):
     skip_output_id = start_node_id
     for layer in self._get_pooling_layers(start_node_id, end_node_id):
         new_layer = deepcopy(layer)
         if is_layer(new_layer, 'Conv'):
             filters = self.node_list[start_node_id].shape[-1]
             kernel_size = layer.kernel_size if layer.padding != int(
                 layer.kernel_size / 2) or layer.stride != 1 else 1
             new_layer = get_conv_class(self.n_dim)(filters, filters, kernel_size, layer.stride,
                                                    padding=layer.padding)
             if self.weighted:
                 init_conv_weight(new_layer)
         else:
             new_layer = deepcopy(layer)
         skip_output_id = self.add_layer(new_layer, skip_output_id)
     skip_output_id = self.add_layer(StubReLU(), skip_output_id)
     return skip_output_id
示例#18
0
文件: graph.py 项目: Saiuz/autokeras
 def _insert_pooling_layer_chain(self, start_node_id, end_node_id):
     skip_output_id = start_node_id
     for layer in self._get_pooling_layers(start_node_id, end_node_id):
         new_layer = deepcopy(layer)
         if is_layer(new_layer, LayerType.CONV):
             filters = self.node_list[start_node_id].shape[-1]
             kernel_size = layer.kernel_size if layer.padding != int(
                 layer.kernel_size / 2) or layer.stride != 1 else 1
             new_layer = get_conv_class(self.n_dim)(filters, filters, kernel_size, layer.stride,
                                                    padding=layer.padding)
             if self.weighted:
                 init_conv_weight(new_layer)
         else:
             new_layer = deepcopy(layer)
         skip_output_id = self.add_layer(new_layer, skip_output_id)
     skip_output_id = self.add_layer(StubReLU(), skip_output_id)
     return skip_output_id
示例#19
0
    def __init__(self, input_shape, weighted=True):
        self.weighted = weighted
        self.node_list = []
        self.layer_list = []
        # node id start with 0
        self.node_to_id = {}
        self.layer_to_id = {}
        self.layer_id_to_input_node_ids = {}
        self.layer_id_to_output_node_ids = {}
        self.adj_list = {}
        self.reverse_adj_list = {}
        self.operation_history = []
        self.n_dim = len(input_shape) - 1
        self.conv = get_conv_class(self.n_dim)
        self.batch_norm = get_batch_norm_class(self.n_dim)

        self.vis = None
        self._add_node(Node(input_shape))
示例#20
0
    def __init__(self, n_output_node, input_shape):
        """Initialize the instance.

        Args:
            n_output_node: An integer. Number of output nodes in the network.
            input_shape: A tuple. Input shape of the network.
        """
        super(CnnGenerator, self).__init__(n_output_node, input_shape)
        self.n_dim = len(self.input_shape) - 1
        if len(self.input_shape) > 4:
            raise ValueError('The input dimension is too high.')
        if len(self.input_shape) < 2:
            raise ValueError('The input dimension is too low.')
        self.conv = get_conv_class(self.n_dim)
        self.dropout = get_dropout_class(self.n_dim)
        self.global_avg_pooling = get_global_avg_pooling_class(self.n_dim)
        self.pooling = get_pooling_class(self.n_dim)
        self.batch_norm = get_batch_norm_class(self.n_dim)
示例#21
0
    def __init__(self, n_output_node, input_shape):
        """Initialize the instance.

        Args:
            n_output_node: An integer. Number of output nodes in the network.
            input_shape: A tuple. Input shape of the network.
        """
        super(CnnGenerator, self).__init__(n_output_node, input_shape)
        self.n_dim = len(self.input_shape) - 1
        if len(self.input_shape) > 4:
            raise ValueError('The input dimension is too high.')
        if len(self.input_shape) < 2:
            raise ValueError('The input dimension is too low.')
        self.conv = get_conv_class(self.n_dim)
        self.dropout = get_dropout_class(self.n_dim)
        self.global_avg_pooling = get_global_avg_pooling_class(self.n_dim)
        self.pooling = get_pooling_class(self.n_dim)
        self.batch_norm = get_batch_norm_class(self.n_dim)
示例#22
0
    def to_add_skip_model(self, start_id, end_id):
        """Add a weighted add skip-connection from after start node to end node.

        Args:
            start_id: The convolutional layer ID, after which to start the skip-connection.
            end_id: The convolutional layer ID, after which to end the skip-connection.
        """
        self.operation_history.append(('to_add_skip_model', start_id, end_id))
        filters_end = self.layer_list[end_id].output.shape[-1]
        filters_start = self.layer_list[start_id].output.shape[-1]
        start_node_id = self.layer_id_to_output_node_ids[start_id][0]

        pre_end_node_id = self.layer_id_to_input_node_ids[end_id][0]
        end_node_id = self.layer_id_to_output_node_ids[end_id][0]

        skip_output_id = self._insert_pooling_layer_chain(
            start_node_id, end_node_id)

        # Add the conv layer
        new_conv_layer = get_conv_class(self.n_dim)(filters_start, filters_end,
                                                    1)
        skip_output_id = self.add_layer(new_conv_layer, skip_output_id)

        # Add the add layer.
        add_input_node_id = self._add_node(
            deepcopy(self.node_list[end_node_id]))
        add_layer = StubAdd()

        self._redirect_edge(pre_end_node_id, end_node_id, add_input_node_id)
        self._add_edge(add_layer, add_input_node_id, end_node_id)
        self._add_edge(add_layer, skip_output_id, end_node_id)
        add_layer.input = [
            self.node_list[add_input_node_id], self.node_list[skip_output_id]
        ]
        add_layer.output = self.node_list[end_node_id]
        self.node_list[end_node_id].shape = add_layer.output_shape

        # Set weights to the additional conv layer.
        if self.weighted:
            filter_shape = (1, ) * self.n_dim
            weights = np.zeros((filters_end, filters_start) + filter_shape)
            bias = np.zeros(filters_end)
            new_conv_layer.set_weights((add_noise(weights, np.array([0, 1])),
                                        add_noise(bias, np.array([0, 1]))))
示例#23
0
文件: graph.py 项目: Saiuz/autokeras
    def to_add_skip_model(self, start_id, end_id):
        """Add a weighted add skip-connection from after start node to end node.

        Args:
            start_id: The convolutional layer ID, after which to start the skip-connection.
            end_id: The convolutional layer ID, after which to end the skip-connection.
        """
        self.operation_history.append(('to_add_skip_model', start_id, end_id))
        filters_end = self.layer_list[end_id].output.shape[-1]
        filters_start = self.layer_list[start_id].output.shape[-1]
        start_node_id = self.layer_id_to_output_node_ids[start_id][0]

        pre_end_node_id = self.layer_id_to_input_node_ids[end_id][0]
        end_node_id = self.layer_id_to_output_node_ids[end_id][0]

        skip_output_id = self._insert_pooling_layer_chain(start_node_id, end_node_id)

        # Add the conv layer
        new_conv_layer = get_conv_class(self.n_dim)(filters_start,
                                                    filters_end,
                                                    1)
        skip_output_id = self.add_layer(new_conv_layer, skip_output_id)

        # Add the add layer.
        add_input_node_id = self._add_node(deepcopy(self.node_list[end_node_id]))
        add_layer = StubAdd()

        self._redirect_edge(pre_end_node_id, end_node_id, add_input_node_id)
        self._add_edge(add_layer, add_input_node_id, end_node_id)
        self._add_edge(add_layer, skip_output_id, end_node_id)
        add_layer.input = [self.node_list[add_input_node_id], self.node_list[skip_output_id]]
        add_layer.output = self.node_list[end_node_id]
        self.node_list[end_node_id].shape = add_layer.output_shape

        # Set weights to the additional conv layer.
        if self.weighted:
            filter_shape = (1,) * self.n_dim
            weights = np.zeros((filters_end, filters_start) + filter_shape)
            bias = np.zeros(filters_end)
            new_conv_layer.set_weights((add_noise(weights, np.array([0, 1])), add_noise(bias, np.array([0, 1]))))
def create_new_layer(layer, n_dim):
    input_shape = layer.output.shape
    dense_deeper_classes = [StubDense, get_dropout_class(n_dim), StubReLU]
    conv_deeper_classes = [
        get_conv_class(n_dim),
        get_batch_norm_class(n_dim), StubReLU
    ]
    if is_layer(layer, LayerType.RELU):
        conv_deeper_classes = [
            get_conv_class(n_dim),
            get_batch_norm_class(n_dim)
        ]
        dense_deeper_classes = [StubDense, get_dropout_class(n_dim)]
    elif is_layer(layer, LayerType.DROPOUT):
        dense_deeper_classes = [StubDense, StubReLU]
    elif is_layer(layer, LayerType.BATCH_NORM):
        conv_deeper_classes = [get_conv_class(n_dim)]  #, StubReLU]

    new_layers = []
    if len(input_shape) == 1:
        # It is in the dense layer part.
        layer_class = sample(dense_deeper_classes, 1)[0]
    else:
        # It is in the conv layer part.
        layer_class = sample(conv_deeper_classes, 1)[0]

    if layer_class == StubDense:
        new_layer = StubDense(input_shape[0], input_shape[0])
        new_layers.append(new_layer)

    elif layer_class == get_dropout_class(n_dim):
        new_layer = layer_class(Constant.DENSE_DROPOUT_RATE)
        new_layers.append(new_layer)

    elif layer_class == get_conv_class(n_dim):
        # add conv layer
        # new_layer = layer_class(input_shape[-1],, input_shape[-1], sample((1, 3, 5), 1)[0], stride=1)

        # add mobilenet block
        in_planes = input_shape[-1]
        expansion = sample((1, 6), 1)[0]
        stride = sample((1, 2), 1)[0]
        planes = expansion * in_planes

        new_layer = layer_class(in_planes, planes, 1, stride=1, padding=0)
        new_layers.append(new_layer)

        new_layer = get_batch_norm_class(n_dim)(planes)
        new_layers.append(new_layer)

        new_layer = StubReLU()
        new_layers.append(new_layer)

        new_layer = layer_class(planes,
                                planes,
                                3,
                                stride=stride,
                                padding=1,
                                groups=planes)
        new_layers.append(new_layer)

        new_layer = get_batch_norm_class(n_dim)(planes)
        new_layers.append(new_layer)

        new_layer = StubReLU()
        new_layers.append(new_layer)

        new_layer = layer_class(planes, in_planes, 1, stride=1, padding=0)
        new_layers.append(new_layer)

        new_layer = get_batch_norm_class(n_dim)(in_planes)
        new_layers.append(new_layer)

    elif layer_class == get_batch_norm_class(n_dim):
        new_layer = layer_class(input_shape[-1])
        new_layers.append(new_layer)

    elif layer_class == get_pooling_class(n_dim):
        new_layer = layer_class(sample((1, 3, 5), 1)[0])
        new_layers.append(new_layer)

    else:
        new_layer = layer_class()
        new_layers.append(new_layer)

    return new_layers