Ejemplo n.º 1
0
def deeper_conv_block(conv_layer, kernel_size):
    """Get deeper layer for convolution layer

    Args:
        conv_layer: the convolution layer from which we get deeper layer
        kernel_size: the size of kernel

    Returns:
        The deeper convolution layer
    """
    filter_shape = (kernel_size, ) * (len(conv_layer.kernel_size))
    n_filters = conv_layer.filters
    weight = np.zeros(filter_shape + (n_filters, n_filters))
    center = tuple(map(lambda x: int((x - 1) / 2), filter_shape))
    for i in range(n_filters):
        filter_weight = np.zeros(filter_shape + (n_filters, ))
        index = center + (i, )
        filter_weight[index] = 1
        weight[..., i] = filter_weight
    bias = np.zeros(n_filters)
    conv_func = get_conv_layer_func(len(filter_shape))
    new_conv_layer = conv_func(n_filters,
                               kernel_size=filter_shape,
                               padding='same')
    new_conv_layer.build((None, ) * (len(filter_shape) + 1) + (n_filters, ))
    new_conv_layer.set_weights((weight, bias))
    return [new_conv_layer, BatchNormalization(), Activation('relu')]
Ejemplo n.º 2
0
def wider_pre_conv(layer, n_add_filters):
    """Get previous convolution layer for current layer

   Args:
       layer: layer from which we get wider previous convolution layer
       n_add_filters: the filters size of convolution layer

   Returns:
       The previous convolution layer
   """
    pre_filter_shape = layer.kernel_size
    n_pre_filters = layer.filters
    rand = np.random.randint(n_pre_filters, size=n_add_filters)
    conv_func = get_conv_layer_func(len(pre_filter_shape))
    teacher_w, teacher_b = layer.get_weights()
    student_w = teacher_w.copy()
    student_b = teacher_b.copy()
    # target layer update (i)
    for i in range(len(rand)):
        teacher_index = rand[i]
        new_weight = teacher_w[..., teacher_index]
        new_weight = new_weight[..., np.newaxis]
        student_w = np.concatenate((student_w, new_weight), axis=-1)
        student_b = np.append(student_b, teacher_b[teacher_index])
    new_pre_layer = conv_func(n_pre_filters + n_add_filters,
                              kernel_size=pre_filter_shape,
                              padding='same')
    new_pre_layer.build((None, ) * (len(pre_filter_shape) + 1) +
                        (student_w.shape[-2], ))
    new_pre_layer.set_weights((student_w, student_b))
    return new_pre_layer
Ejemplo n.º 3
0
def wider_next_conv(layer, start_dim, total_dim, n_add):
    """Get next wider convolution layer for current layer

   Args:
       layer: the layer from which we get wider next convolution layer
       start_dim: the started dimension
       total_dim: the total dimension
       n_add: the filters size of convolution layer

   Returns:
       The next wider convolution layer
   """
    filter_shape = layer.kernel_size
    conv_func = get_conv_layer_func(len(filter_shape))
    n_filters = layer.filters
    teacher_w, teacher_b = layer.get_weights()

    new_weight_shape = list(teacher_w.shape)
    new_weight_shape[-2] = n_add
    new_weight = np.zeros(tuple(new_weight_shape))

    student_w = np.concatenate(
        (teacher_w[..., :start_dim, :].copy(), new_weight,
         teacher_w[..., start_dim:total_dim, :].copy()),
        axis=-2)
    new_layer = conv_func(n_filters, kernel_size=filter_shape, padding='same')
    input_shape = list((None, ) * (len(filter_shape) + 1) +
                       (student_w.shape[-2], ))
    new_layer.build(tuple(input_shape))
    new_layer.set_weights((student_w, teacher_b))
    return new_layer
Ejemplo n.º 4
0
    def generate(self):
        """Return the default classifier model that has been compiled."""
        pool = self._get_pool_layer_func()
        conv = get_conv_layer_func(len(self._get_shape(3)))

        input_tensor = Input(shape=self.input_shape)
        output_tensor = conv(32,
                             kernel_size=self._get_shape(3),
                             padding='same')(input_tensor)
        output_tensor = BatchNormalization()(output_tensor)
        output_tensor = Activation('relu')(output_tensor)

        output_tensor = conv(64, self._get_shape(3),
                             padding='same')(output_tensor)
        output_tensor = BatchNormalization()(output_tensor)
        output_tensor = Activation('relu')(output_tensor)

        output_tensor = pool(pool_size=self._get_shape(2),
                             padding='same')(output_tensor)
        output_tensor = Dropout(0.25)(output_tensor)
        output_tensor = Flatten()(output_tensor)
        output_tensor = Dense(128, activation='relu')(output_tensor)
        output_tensor = Dropout(0.5)(output_tensor)
        output_tensor = Dense(self.n_classes,
                              activation='softmax')(output_tensor)

        model = Model(input_tensor, output_tensor)
        model.compile(loss=categorical_crossentropy,
                      optimizer=Adadelta(),
                      metrics=['accuracy'])
        return model
Ejemplo n.º 5
0
    def generate(self):
        """Return the default classifier model that has been compiled."""
        pool = self._get_pool_layer_func()
        conv = get_conv_layer_func(len(self._get_shape(3)))

        input_tensor = Input(shape=self.input_shape)
        output_tensor = conv(32,
                             kernel_size=self._get_shape(3),
                             padding='same',
                             activation='linear')(input_tensor)
        output_tensor = BatchNormalization()(output_tensor)
        output_tensor = Activation('relu')(output_tensor)

        output_tensor = pool(padding='same')(output_tensor)

        output_tensor = conv(64,
                             kernel_size=self._get_shape(3),
                             padding='same',
                             activation='linear')(output_tensor)
        output_tensor = BatchNormalization()(output_tensor)
        output_tensor = Activation('relu')(output_tensor)

        output_tensor = pool(padding='same')(output_tensor)

        output_tensor = conv(64,
                             kernel_size=self._get_shape(3),
                             padding='same',
                             activation='linear')(output_tensor)
        output_tensor = BatchNormalization()(output_tensor)
        output_tensor = Activation('relu')(output_tensor)

        output_tensor = pool(padding='same')(output_tensor)

        output_tensor = conv(64,
                             kernel_size=self._get_shape(3),
                             padding='same',
                             activation='linear')(output_tensor)
        output_tensor = BatchNormalization()(output_tensor)
        output_tensor = Activation('relu')(output_tensor)

        output_tensor = Flatten()(output_tensor)
        output_tensor = Dense(128, activation='relu')(output_tensor)
        output_tensor = Dense(128, activation='relu')(output_tensor)
        output_tensor = Dense(self.n_classes,
                              activation='softmax')(output_tensor)
        return Model(inputs=input_tensor, outputs=output_tensor)
Ejemplo n.º 6
0
    def generate(self):
        """Return the random generated CNN model."""
        conv_num = randint(1, 10)
        dense_num = randint(1, 10)
        dropout_rate = random()
        filter_size = randint(1, 2) * 2 + 1
        pool_size = randint(2, 3)
        filter_shape = self._get_shape(filter_size)
        pool_shape = self._get_shape(pool_size)
        pool = self._get_pool_layer_func()
        conv = get_conv_layer_func(len(filter_shape))

        input_tensor = Input(shape=self.input_shape)
        output_tensor = input_tensor
        for i in range(conv_num):
            kernel_num = randint(10, 30)
            output_tensor = conv(kernel_num, filter_shape,
                                 padding='same')(output_tensor)
            output_tensor = BatchNormalization()(output_tensor)
            output_tensor = Activation('relu')(output_tensor)
            if random() > 0.5:
                output_tensor = pool(pool_size=pool_shape,
                                     padding='same')(output_tensor)
            if random() > 0.5:
                output_tensor = Dropout(dropout_rate)(output_tensor)
        output_tensor = Flatten()(output_tensor)
        for i in range(dense_num):
            node_num = randint(128, 1024)
            output_tensor = Dense(node_num, activation='relu')(output_tensor)
            if random() > 0.5:
                output_tensor = Dropout(dropout_rate)(output_tensor)
        output_tensor = Dense(self.n_classes,
                              activation='softmax')(output_tensor)
        model = Model(input_tensor, output_tensor)
        model.compile(loss='categorical_crossentropy',
                      optimizer=Adam(),
                      metrics=['accuracy'])
        return model
Ejemplo n.º 7
0
    def generate(self,
                 model_len=constant.MODEL_LEN,
                 model_width=constant.MODEL_WIDTH):
        """Return the default classifier model that has been compiled."""
        pool = self._get_pool_layer_func()
        conv = get_conv_layer_func(len(self._get_shape(3)))
        ave = get_ave_layer_func(len(self._get_shape(3)))

        pooling_len = int(model_len / 4)
        output_tensor = input_tensor = Input(shape=self.input_shape)
        for i in range(model_len):
            output_tensor = BatchNormalization()(output_tensor)
            output_tensor = Activation('relu')(output_tensor)
            output_tensor = conv(model_width,
                                 kernel_size=self._get_shape(3),
                                 padding='same')(output_tensor)
            output_tensor = Dropout(constant.CONV_DROPOUT_RATE)(output_tensor)
            if (i + 1) % pooling_len == 0 and i != model_len - 1:
                output_tensor = pool(padding='same')(output_tensor)

        output_tensor = ave()(output_tensor)
        output_tensor = Dense(self.n_classes,
                              activation='softmax')(output_tensor)
        return Model(inputs=input_tensor, outputs=output_tensor)