Beispiel #1
0
def cloneLayerFromLayer(pLayer):
    if isinstance(pLayer, Convolution1D):
        return Convolution1D.from_config(pLayer.get_config())
    elif isinstance(pLayer, Convolution2D):
        return Convolution2D.from_config(pLayer.get_config())
    elif isinstance(pLayer, Convolution3D):
        return Convolution3D.from_config(pLayer.get_config())
    # Max-Pooling:
    elif isinstance(pLayer, MaxPooling1D):
        return MaxPooling2D.from_config(pLayer.get_config())
    elif isinstance(pLayer, MaxPooling2D):
        return MaxPooling2D.from_config(pLayer.get_config())
    elif isinstance(pLayer, MaxPooling3D):
        return MaxPooling3D.from_config(pLayer.get_config())
    # Average-Pooling
    elif isinstance(pLayer, AveragePooling1D):
        return AveragePooling1D.from_config(pLayer.get_config())
    elif isinstance(pLayer, AveragePooling2D):
        return AveragePooling2D.from_config(pLayer.get_config())
    elif isinstance(pLayer, AveragePooling3D):
        return AveragePooling3D.from_config(pLayer.get_config())
    #
    elif isinstance(pLayer, Flatten):
        return Flatten.from_config(pLayer.get_config())
    elif isinstance(pLayer, Merge):
        return Merge.from_config(pLayer.get_config())
    elif isinstance(pLayer, Activation):
        return Activation.from_config(pLayer.get_config())
    elif isinstance(pLayer, Dropout):
        return Dropout.from_config(pLayer.get_config())
    #
    elif isinstance(pLayer, Dense):
        return Dense.from_config(pLayer.get_config())
    return None
def generate_conv_model(model):

    model = model.get_layer(index=1)

    inp = (model.inputs[0].shape.dims[1].value,
           model.inputs[0].shape.dims[2].value,
           model.inputs[0].shape.dims[3].value)

    H = Input(inp)
    inp = H

    for layer_idx in range(1, len(model.layers)):

        layer = model.get_layer(index=layer_idx)
        config = layer.get_config()

        if isinstance(layer, MaxPooling2D):
            H = MaxPooling2D.from_config(config)(H)

        if isinstance(layer, Dropout):
            H = Dropout.from_config(config)(H)

        if isinstance(layer, Activation):
            H = Activation.from_config(config)(H)

        if isinstance(layer, BatchNormalization):
            weights = layer.get_weights()
            H = BatchNormalization(weights=weights)(H)

        elif isinstance(layer, Conv2D):
            weights = layer.get_weights()

            config['filters'] = weights[1].shape[0]
            H = Conv2D(activation=config['activation'],
                       activity_regularizer=config['activity_regularizer'],
                       bias_constraint=config['bias_constraint'],
                       bias_regularizer=config['bias_regularizer'],
                       data_format=config['data_format'],
                       dilation_rate=config['dilation_rate'],
                       filters=config['filters'],
                       kernel_constraint=config['kernel_constraint'],
                       kernel_regularizer=config['kernel_regularizer'],
                       kernel_size=config['kernel_size'],
                       name=config['name'],
                       padding=config['padding'],
                       strides=config['strides'],
                       trainable=config['trainable'],
                       use_bias=config['use_bias'],
                       weights=weights)(H)

    return Model(inp, H)
Beispiel #3
0
    def build_model(self, layers, compile={}):
        """根據配置項構建 `self.model`。

        Args:
            layers ([dict]): 層定義集合。集合中每一項為一層的定義。
                             層定義包含 `type`:(dense或lstm) 用來定義層的類型。
                             層定義中其他屬性參見 `Dense`_ 和 `LSTM`_ 構造函數參數定義。
            compile (dict):  訓練配置模型定義。定義可用屬性參見 `compile`_ 函數定義。

        Returns:

        .. _Dense:
        https://keras.io/zh/layers/core/#dense
        .. _LSTM:
        https://keras.io/zh/layers/recurrent/#lstm
        .. _compile:
        https://keras.io/zh/models/model/#compile

        """
        for layer in layers:
            t = layer.pop('type')
            if t == 'dense':
                # https://keras.io/zh/layers/core/
                self.__model.add(Dense.from_config(layer))
            elif t == 'lstm':
                # https://keras.io/zh/layers/recurrent/#lstm
                self.__model.add(LSTM.from_config(layer))
            elif t == 'dropout':
                # https://keras.io/zh/layers/recurrent/#Dropout
                self.__model.add(Dropout.from_config(layer))
            elif t == 'cudnnlstm':
                # https://keras.io/zh/layers/recurrent/#Dropout
                self.__model.add(CuDNNLSTM.from_config(layer))

        # https://keras.io/zh/models/model/#compile
        self.__model.compile(**compile)
Beispiel #4
0
def read_model(src_model):
    ## args

    ##  list of kernels to remove.
    model = load_model(src_model)
    inp = (model.inputs[0].shape.dims[1].value,
           model.inputs[0].shape.dims[2].value,
           model.inputs[0].shape.dims[3].value)

    H = Input(inp)
    inp = H

    for i in range(len(model.layers)):
        layer = model.get_layer(index=i)
        config = layer.get_config()

        if isinstance(layer, MaxPooling2D):
            H = MaxPooling2D.from_config(config)(H)

        if isinstance(layer, Dropout):
            H = Dropout.from_config(config)(H)

        if isinstance(layer, Activation):
            H = Activation.from_config(config)(H)
        elif isinstance(layer, Conv2D):
            weights = layer.get_weights()
            config['trainable'] = True
            H = Conv2D(
                activation=config['activation'],
                activity_regularizer=config['activity_regularizer'],
                bias_constraint=config['bias_constraint'],
                bias_regularizer=config['bias_regularizer'],
                data_format=config['data_format'],
                dilation_rate=config['dilation_rate'],
                filters=config['filters'],
                kernel_constraint=config['kernel_constraint'],
                # config=config['config'],
                # scale=config['scale'],
                kernel_regularizer=config['kernel_regularizer'],
                kernel_size=config['kernel_size'],
                name=config['name'],
                padding=config['padding'],
                strides=config['strides'],
                trainable=config['trainable'],
                use_bias=config['use_bias'],
                weights=weights)(H)

        elif isinstance(layer, Flatten):
            H = Flatten()(H)

        elif isinstance(layer, Dense):
            weights = layer.get_weights()
            config['trainable'] = True
            config['name'] = config['name'] + "1"
            H = Dense(units=config['units'],
                      activation=config['activation'],
                      activity_regularizer=config['activity_regularizer'],
                      bias_constraint=config['bias_constraint'],
                      bias_regularizer=config['bias_regularizer'],
                      kernel_constraint=config['kernel_constraint'],
                      kernel_regularizer=config['kernel_regularizer'],
                      kernel_initializer='glorot_uniform',
                      name=config['name'],
                      trainable=config['trainable'],
                      use_bias=config['use_bias'])(H)

    ## it returns the model changed
    return Model(inp, H)
Beispiel #5
0
def PrunWeight(model, model_name, x_prune, y_prune, x_test, y_test, pruning_rate, compile_info , fine_tune):
    
    ############ Calculating weight limit for pruning ##### 
    ############ We do not consider biases in the pruning process #####
    parameters = []
    conv_layers_weights = []
    for layer in model.layers:
        if layer.get_config()['name'].find("conv") != -1:
            conv_layers_weights.append(layer.get_weights())

    for _, layer_weights in enumerate(conv_layers_weights):
        parameters.append(K.flatten(K.abs(layer_weights[0])))

    dense_layers_weights = []
    for layer in model.layers:
        if layer.get_config()['name'].find("dense") != -1:
            dense_layers_weights.append(layer.get_weights())

    for _, layer_weights in enumerate(dense_layers_weights):
        parameters.append(K.flatten(K.abs(layer_weights[0])))
    
    parameters =  K.concatenate(parameters)
    parameters = sorted(K.get_value(parameters).tolist())
    weight_limit = parameters[int(pruning_rate*len(parameters))]
    print("Pruning weight threshhold : ", weight_limit)
    ##################################################################
    dense_layers_weights = []
    conv_filter_weights = []
    batch_norm_params = []
    kernel_masks_for_dense_and_conv_layers = []
    model_tensors_dict = {}
    input_height,input_width,input_channels = model.input.shape[1:]

    pruned_model_input = Input(shape=(int(input_height),int(input_width),int(input_channels)))

    if model.layers[0].name.find('input') == -1:
        model_tensors_dict[str(model.layers[0].input.name)] = pruned_model_input
    else:
        model_tensors_dict[str(model.layers[0].output.name)] = pruned_model_input
        
    Flow = pruned_model_input
    
    for _,layer in enumerate(model.layers):
        if layer.get_config()['name'].find("conv2d") != -1:
            kernel_mask = K.cast(weight_limit <= K.abs(layer.get_weights()[0]) ,'float32')
            kernel_masks_for_dense_and_conv_layers.append(kernel_mask)
            Flow  = MaskedConv2D(filters=layer.get_config()['filters'], kernel_size=layer.get_config()['kernel_size'],kernel_initializer=layer.get_config()['kernel_initializer'], 
            kernel_regularizer= layer.get_config()['kernel_regularizer'], strides=layer.get_config()['strides'],
            padding=layer.get_config()['padding'], activation=layer.get_config()['activation'], use_bias=layer.get_config()['use_bias'], Masked=True , kernel_mask_val=kernel_mask)(model_tensors_dict[str(layer.input.name)])
            conv_filter_weights.append(layer.get_weights())
            model_tensors_dict[str(layer.output.name)] = Flow
            
        elif layer.get_config()['name'].find("dense") != -1:
            kernel_mask = K.cast(weight_limit <= K.abs(layer.get_weights()[0]) ,'float32')
            kernel_masks_for_dense_and_conv_layers.append(kernel_mask)
            Flow = MaskedDense(units=layer.get_config()['units'], activation=layer.get_config()['activation'],
            use_bias=layer.get_config()['use_bias'], kernel_initializer = layer.get_config()['kernel_initializer'],
            Masked=True , kernel_mask_val=kernel_mask)(model_tensors_dict[str(layer.input.name)])
            dense_layers_weights.append(layer.get_weights())
            model_tensors_dict[str(layer.output.name)] = Flow

        elif layer.get_config()['name'].find("activation") != -1:
            Flow = Activation.from_config(layer.get_config())(model_tensors_dict[str(layer.input.name)])
            model_tensors_dict[str(layer.output.name)] = Flow
            
        elif layer.get_config()['name'].find("max_pooling") != -1:
            Flow = MaxPooling2D.from_config(layer.get_config())(model_tensors_dict[str(layer.input.name)])
            model_tensors_dict[str(layer.output.name)] = Flow
        
        elif layer.get_config()['name'].find("average_pooling") != -1:
            Flow = AveragePooling2D.from_config(layer.get_config())(model_tensors_dict[str(layer.input.name)])
            model_tensors_dict[str(layer.output.name)] = Flow
        
        elif layer.get_config()['name'].find("dropout") != -1:
            Flow = Dropout.from_config(layer.get_config())(model_tensors_dict[str(layer.input.name)])
            model_tensors_dict[str(layer.output.name)] = Flow

        elif layer.get_config()['name'].find("flatten") != -1:
            Flow = Flatten.from_config(layer.get_config())(model_tensors_dict[str(layer.input.name)])
            model_tensors_dict[str(layer.output.name)] = Flow

        elif layer.get_config()['name'].find("add") != -1:
            input_tensors_list = []
            for idx in range(len(layer.input)):
                input_tensors_list.append(model_tensors_dict[layer.input[idx].name])
            Flow = add(input_tensors_list)
            model_tensors_dict[str(layer.output.name)] = Flow
        
        elif layer.get_config()['name'].find("batch_normalization") != -1:
            batch_norm_params.append(layer.get_weights())
            Flow = BatchNormalization.from_config(layer.get_config())(model_tensors_dict[str(layer.input.name)])
            model_tensors_dict[str(layer.output.name)] = Flow
        
        elif layer.get_config()['name'].find("input") != -1:
            pass
            
    pruned_model  = Model(pruned_model_input, Flow)
    ########################## setting the weight s of layers #############################
    for layer in pruned_model.layers:
        if layer.get_config()['name'].find("dense") != -1:
            pruned_weights = [dense_layers_weights[0][0]*K.get_value(kernel_masks_for_dense_and_conv_layers[0])]
            if layer.get_config()['use_bias']:
                pruned_weights.append(dense_layers_weights[0][1])
            layer.set_weights(pruned_weights)
            del kernel_masks_for_dense_and_conv_layers[0]
            del dense_layers_weights[0]
        
        elif layer.get_config()['name'].find("conv2d") != -1:
            pruned_weights = [conv_filter_weights[0][0]*K.get_value(kernel_masks_for_dense_and_conv_layers[0])]
            if layer.get_config()['use_bias']:
                pruned_weights.append(conv_filter_weights[0][1])
            layer.set_weights(pruned_weights)
            del kernel_masks_for_dense_and_conv_layers[0]
            del conv_filter_weights[0]
            
        elif layer.get_config()['name'].find("batch") != -1:
            layer.set_weights(batch_norm_params[0])
            del batch_norm_params[0]
    ############################### Fine-tuning ############################################
    pruned_model.compile(loss=compile_info['loss'],
            optimizer=compile_info['optimizer'],
            metrics=compile_info['metrics'])
    
    if not fine_tune:
        return pruned_model
    else:
        early_stopping = EarlyStopping(monitor='val_acc', patience=2,verbose=0)
        callbacks = [early_stopping]
        # fine-tuning the network.
        pruned_model.fit(x_prune, y_prune,
                    batch_size=256,
                    epochs=10,
                    validation_data=(x_test, y_test),
                    shuffle=True,
                    callbacks=callbacks,
                    verbose=0
                    )

        return pruned_model
Beispiel #6
0
    def rebuild_model(self, kernel_list=None):
        ## args

        ##  list of kernels to remove.

        inp = (self.model.inputs[0].shape.dims[1].value,
               self.model.inputs[0].shape.dims[2].value,
               self.model.inputs[0].shape.dims[3].value)

        H = Input(inp)
        inp = H

        for i in range(len(self.model.layers)):
            layer = self.model.get_layer(index=i)
            config = layer.get_config()

            if isinstance(layer, MaxPooling2D):
                H = MaxPooling2D.from_config(config)(H)

            if isinstance(layer, Dropout):
                H = Dropout.from_config(config)(H)

            if isinstance(layer, Activation):
                H = Activation.from_config(config)(H)
            elif isinstance(layer, Conv2D):
                weights = layer.get_weights()

                if i == self.target_layer:
                    weights[0] = np.delete(weights[0], kernel_list, axis=3)
                    if (len(weights) == 2):
                        weights[1] = np.delete(weights[1], kernel_list, 0)
                else:
                    if i == self.changed_layer:
                        weights[0] = np.delete(weights[0], kernel_list, axis=2)

                config['filters'] = weights[0].shape[3]

                H = Conv2D(
                    activation=config['activation'],
                    activity_regularizer=config['activity_regularizer'],
                    bias_constraint=config['bias_constraint'],
                    bias_regularizer=config['bias_regularizer'],
                    data_format=config['data_format'],
                    dilation_rate=config['dilation_rate'],
                    filters=config['filters'],
                    kernel_constraint=config['kernel_constraint'],
                    # config=config['config'],
                    # scale=config['scale'],
                    kernel_regularizer=config['kernel_regularizer'],
                    kernel_size=config['kernel_size'],
                    name=config['name'],
                    padding=config['padding'],
                    strides=config['strides'],
                    trainable=config['trainable'],
                    use_bias=config['use_bias'],
                    weights=weights)(H)

            elif isinstance(layer, Flatten):
                H = Flatten()(H)

            elif isinstance(layer, Dense):
                weights = layer.get_weights()
                if i == self.changed_layer:

                    shape = self.model.layers[i - 1].input_shape
                    new_weights = np.zeros(
                        (shape[1] * shape[2] * (shape[3] - len(kernel_list)),
                         weights[0].shape[1]))

                    for j in range(weights[0].shape[1]):
                        new_weights[:, j] = np.delete(
                            weights[0][:, j].reshape(
                                (shape[1], shape[2], shape[3])), kernel_list,
                            2).reshape(-1)
                    weights[0] = new_weights
                    config['units'] = weights[0].shape[1]
                H = Dense(units=config['units'],
                          activation=config['activation'],
                          activity_regularizer=config['activity_regularizer'],
                          bias_constraint=config['bias_constraint'],
                          bias_regularizer=config['bias_regularizer'],
                          kernel_constraint=config['kernel_constraint'],
                          kernel_regularizer=config['kernel_regularizer'],
                          name=config['name'],
                          trainable=config['trainable'],
                          use_bias=config['use_bias'],
                          weights=weights)(H)

        ## it returns the model changed
        return Model(inp, H)
Beispiel #7
0
    def get_partial_model(self):

        inp = (self.prunedmodel.inputs[0].shape.dims[1].value,
               self.prunedmodel.inputs[0].shape.dims[2].value,
               self.prunedmodel.inputs[0].shape.dims[3].value)

        inp = Input(inp)

        i = 0
        while (i <= self.changed_layer):
            layer = self.prunedmodel.get_layer(index=i)

            config = layer.get_config()

            if isinstance(layer, MaxPooling2D):
                H = MaxPooling2D(pool_size=config['pool_size'],
                                 strides=config['strides'],
                                 name=config['name'])(H)

            if isinstance(layer, Dropout):
                H = Dropout.from_config(config)(H)

            if isinstance(layer, Activation):
                H = Activation.from_config(config)(H)
            elif isinstance(layer, Conv2D):
                weights = self.prunedmodel.layers[i].get_weights()
                if i == 1:
                    H = Conv2D(
                        activation=config['activation'],
                        activity_regularizer=config['activity_regularizer'],
                        bias_constraint=config['bias_constraint'],
                        bias_regularizer=config['bias_regularizer'],
                        data_format=config['data_format'],
                        dilation_rate=config['dilation_rate'],
                        filters=config['filters'],
                        kernel_constraint=config['kernel_constraint'],
                        # config=config['config'],
                        # scale=config['scale'],
                        kernel_regularizer=config['kernel_regularizer'],
                        kernel_size=config['kernel_size'],
                        name=config['name'],
                        padding=config['padding'],
                        strides=config['strides'],
                        trainable=config['trainable'],
                        use_bias=config['use_bias'],
                        weights=weights)(inp)

                else:
                    H = Conv2D(
                        activation=config['activation'],
                        activity_regularizer=config['activity_regularizer'],
                        bias_constraint=config['bias_constraint'],
                        bias_regularizer=config['bias_regularizer'],
                        data_format=config['data_format'],
                        dilation_rate=config['dilation_rate'],
                        filters=config['filters'],
                        kernel_constraint=config['kernel_constraint'],
                        # config=config['config'],
                        # scale=config['scale'],
                        kernel_regularizer=config['kernel_regularizer'],
                        kernel_size=config['kernel_size'],
                        name=config['name'],
                        padding=config['padding'],
                        strides=config['strides'],
                        trainable=config['trainable'],
                        use_bias=config['use_bias'],
                        weights=weights)(H)

            elif isinstance(layer, Flatten):
                H = Flatten()(H)

            elif isinstance(layer, Dense):
                weights = layer.get_weights()
                H = Dense(units=config['units'],
                          activation=config['activation'],
                          activity_regularizer=config['activity_regularizer'],
                          bias_constraint=config['bias_constraint'],
                          bias_regularizer=config['bias_regularizer'],
                          kernel_constraint=config['kernel_constraint'],
                          kernel_regularizer=config['kernel_regularizer'],
                          name=config['name'],
                          trainable=config['trainable'],
                          use_bias=config['use_bias'],
                          weights=weights)(H)

            i += 1

        layer = self.prunedmodel.get_layer(index=i)
        if isinstance(layer, MaxPooling2D):
            config = layer.get_config()
            H = MaxPooling2D(pool_size=config['pool_size'],
                             strides=config['strides'],
                             name=config['name'])(H)
            i += 1

        return Model(inp, H), (i - 1)
def rebuild_net(model=None, layer_filters=[]):
    n_discarded_filters = 0
    total_filters = 0
    model = model
    inp = (model.inputs[0].shape.dims[1].value,
           model.inputs[0].shape.dims[2].value,
           model.inputs[0].shape.dims[3].value)

    H = Input(inp)
    inp = H
    idx_previous = []

    for i in range(1, len(model.layers)):

        layer = model.get_layer(index=i)
        config = layer.get_config()

        if isinstance(layer, MaxPooling2D):
            H = MaxPooling2D.from_config(config)(H)

        if isinstance(layer, Dropout):
            H = Dropout.from_config(config)(H)

        if isinstance(layer, Activation):
            H = Activation.from_config(config)(H)

        if isinstance(layer, BatchNormalization):
            weights = layer.get_weights()
            weights[0] = np.delete(weights[0], idx_previous)
            weights[1] = np.delete(weights[1], idx_previous)
            weights[2] = np.delete(weights[2], idx_previous)
            weights[3] = np.delete(weights[3], idx_previous)
            H = BatchNormalization(weights=weights)(H)

        elif isinstance(layer, Conv2D):
            weights = layer.get_weights()

            n_filters = weights[0].shape[3]
            total_filters = total_filters + n_filters

            idxs = [item for item in layer_filters if item[0] == i][0][1]

            weights[0] = np.delete(weights[0], idxs, axis=3)
            weights[1] = np.delete(weights[1], idxs)
            n_discarded_filters += len(idxs)
            if len(idx_previous) != 0:
                weights[0] = np.delete(weights[0], idx_previous, axis=2)

            config['filters'] = weights[1].shape[0]
            H = Conv2D(
                activation=config['activation'],
                activity_regularizer=config['activity_regularizer'],
                bias_constraint=config['bias_constraint'],
                bias_regularizer=config['bias_regularizer'],
                data_format=config['data_format'],
                dilation_rate=config['dilation_rate'],
                filters=config['filters'],
                kernel_constraint=config['kernel_constraint'],
                # config=config['config'],
                # scale=config['scale'],
                kernel_regularizer=config['kernel_regularizer'],
                kernel_size=config['kernel_size'],
                name=config['name'],
                padding=config['padding'],
                strides=config['strides'],
                trainable=config['trainable'],
                use_bias=config['use_bias'],
                weights=weights)(H)

        idx_previous = idxs
    print('Percentage of discarded filters {}'.format(n_discarded_filters /
                                                      float(total_filters)))
    return Model(inp, H)