コード例 #1
0
ファイル: surgeon.py プロジェクト: ranti-iitg/keras-surgeon
    def _apply_delete_mask(self, node, inbound_masks):
        """Apply the inbound delete mask and return the outbound delete mask

        When specific channels in a layer or layer instance are deleted, the
        mask propagates information about which channels are affected to
        downstream layers.
        If the layer contains weights, those which were previously connected
        to the deleted channels are deleted and outbound masks are set to None
        since further downstream layers aren't affected.
        If the layer does not contain weights, its output mask is calculated to
        reflect any transformations performed by the layer to ensure that
        information about the deleted channels is propagated downstream.


        Arguments:
            node(Node): The node where the delete mask is applied.
            inbound_masks: Mask(s) from inbound node(s).

        Returns:
            new_layer: Pass through `layer` if it has no weights, otherwise a
                       new `Layer` object with weights corresponding to the
                       inbound mask deleted.
            outbound_mask: Mask corresponding to `new_layer`.
        """

        # TODO: This breaks layer sharing. Write a test for this.

        # if delete_mask is None or all values are True, it does not affect
        # this layer or any layers above/downstream from it
        print(node, "node")
        print(inbound_masks, "inbound_masks")

        layer = node.outbound_layer
        print(layer, "outbound layer")
        if all(mask is None for mask in inbound_masks):
            new_layer = layer
            outbound_mask = None
            print(node, "node if return")
            return new_layer, outbound_mask
        elif any(mask is None for mask in inbound_masks):
            inbound_masks = [
                np.ones(shape[1:], dtype=bool)
                if inbound_masks[i] is None else inbound_masks[i]
                for i, shape in enumerate(node.input_shapes)
            ]

        output_shape = utils.single_element(node.output_shapes)
        input_shape = utils.single_element(node.input_shapes)
        data_format = getattr(layer, 'data_format', 'channels_last')
        inbound_masks = utils.single_element(inbound_masks)
        print(node, "node")
        # otherwise, delete_mask.shape should be: layer.input_shape[1:]
        layer_class = layer.__class__.__name__
        if layer_class == 'InputLayer':
            raise RuntimeError('This should never get here!')

        elif layer_class == 'Dense':
            if np.all(inbound_masks):
                new_layer = layer
            else:
                weights = layer.get_weights()
                weights[0] = weights[0][np.where(inbound_masks)[0], :]
                config = layer.get_config()
                config['weights'] = weights
                new_layer = type(layer).from_config(config)
            outbound_mask = None

        elif layer_class == 'Flatten':
            outbound_mask = np.reshape(inbound_masks, [
                -1,
            ])
            new_layer = layer

        elif layer_class in ('Conv1D', 'Conv2D', 'Conv3D'):
            if np.all(inbound_masks):
                new_layer = layer
                print("npall conv1d")
            else:
                if data_format == 'channels_first':
                    inbound_masks = np.swapaxes(inbound_masks, 0, -1)
                # Conv layer: trim down inbound_masks to filter shape
                k_size = layer.kernel_size
                index = [slice(None, dim_size, None) for dim_size in k_size]
                inbound_masks = inbound_masks[index + [slice(None)]]
                # Delete unused weights to obtain new_weights
                weights = layer.get_weights()
                # Each deleted channel was connected to all of the channels
                # in layer; therefore, the mask must be repeated for each
                # channel.
                # `delete_mask`'s size: size(inbound_mask)+[layer.filters]
                # TODO: replace repeat with tile
                delete_mask = np.repeat(inbound_masks[..., np.newaxis],
                                        weights[0].shape[-1],
                                        axis=-1)
                new_shape = list(weights[0].shape)
                new_shape[-2] = -1  # Weights always have channels_last
                print(weights[0].shape, "weights[0]")
                print(delete_mask.shape, "deletemask")
                print(new_shape, "new_shape")
                weights[0] = np.reshape(weights[0][delete_mask], new_shape)
                # Instantiate new layer with new_weights
                config = layer.get_config()
                config['weights'] = weights
                new_layer = type(layer).from_config(config)
            outbound_mask = None

        elif layer_class in ('Cropping1D', 'Cropping2D', 'Cropping3D',
                             'MaxPooling1D', 'MaxPooling2D', 'MaxPooling3D',
                             'AveragePooling1D', 'AveragePooling2D',
                             'AveragePooling3D'):
            index = [slice(None, x, None) for x in output_shape[1:]]
            if data_format == 'channels_first':
                index[0] = slice(None)
            elif data_format == 'channels_last':
                index[-1] = slice(None)
            else:
                raise ValueError('Invalid data format')
            outbound_mask = inbound_masks[index]
            new_layer = layer

        elif layer_class in ('UpSampling1D', 'UpSampling2D', 'UpSampling3D',
                             'ZeroPadding1D', 'ZeroPadding2D',
                             'ZeroPadding3D'):

            # Get slice of mask with all singleton dimensions except
            # channels dimension
            index = [slice(1)] * (len(input_shape) - 1)
            tile_shape = list(output_shape[1:])
            if data_format == 'channels_first':
                index[0] = slice(None)
                tile_shape[0] = 1
            elif data_format == 'channels_last':
                index[-1] = slice(None)
                tile_shape[-1] = 1
            else:
                raise ValueError('Invalid data format')
            channels_vector = inbound_masks[index]
            # Tile this slice to create the outbound mask
            outbound_mask = np.tile(channels_vector, tile_shape)
            new_layer = layer

        elif layer_class in ('GlobalMaxPooling1D', 'GlobalMaxPooling2D',
                             'GlobalAveragePooling1D',
                             'GlobalAveragePooling2D'):
            # Get slice of mask with all singleton dimensions except
            # channels dimension
            index = [0] * (len(input_shape) - 1)
            if data_format == 'channels_first':
                index[0] = slice(None)
            elif data_format == 'channels_last':
                index[-1] = slice(None)
            else:
                raise ValueError('Invalid data format')
            channels_vector = inbound_masks[index]
            # Tile this slice to create the outbound mask
            outbound_mask = channels_vector
            new_layer = layer

        elif layer_class in ('Dropout', 'Activation', 'SpatialDropout1D',
                             'SpatialDropout2D', 'SpatialDropout3D',
                             'ActivityRegularization', 'Masking', 'LeakyReLU',
                             'ELU', 'ThresholdedReLU', 'GaussianNoise',
                             'GaussianDropout', 'AlphaDropout'):
            # Pass-through layers
            outbound_mask = inbound_masks
            new_layer = layer

        elif layer_class == 'Reshape':
            outbound_mask = np.reshape(inbound_masks, layer.target_shape)
            new_layer = layer

        elif layer_class == 'Permute':
            outbound_mask = np.transpose(inbound_masks,
                                         [x - 1 for x in layer.dims])
            new_layer = layer

        elif layer_class == 'RepeatVector':
            outbound_mask = np.repeat(np.expand_dims(inbound_masks, 0),
                                      layer.n,
                                      axis=0)
            new_layer = layer

        elif layer_class == 'Embedding':
            # Embedding will always be the first layer so it doesn't need
            # to consider the inbound_delete_mask
            if inbound_masks is not None:
                raise ValueError('Channels cannot be deleted bedore Embedding '
                                 'layers because they change the number of '
                                 'channels.')
            outbound_mask = None
            new_layer = layer

        elif layer_class in ('Add', 'Multiply', 'Average', 'Maximum'):
            # The inputs must be the same size
            if not utils.all_equal(inbound_masks):
                ValueError(
                    '{0} layers must have the same size inputs. All '
                    'inbound nodes must have the same channels deleted'.format(
                        layer_class))
            outbound_mask = inbound_masks[1]
            new_layer = layer

        elif layer_class == 'Concatenate':
            axis = layer.axis
            if layer.axis < 0:
                axis = axis % len(layer.input_shape[0])
            # Below: axis=axis-1 because the mask excludes the batch dimension
            outbound_mask = np.concatenate(inbound_masks, axis=axis - 1)
            new_layer = layer

        elif layer_class in ('SimpleRNN', 'GRU', 'LSTM'):
            if np.all(inbound_masks):
                new_layer = layer
            else:
                weights = layer.get_weights()
                weights[0] = weights[0][np.where(inbound_masks[0, :])[0], :]
                config = layer.get_config()
                config['weights'] = weights
                new_layer = type(layer).from_config(config)
            outbound_mask = None

        elif layer_class == 'BatchNormalization':
            outbound_mask = inbound_masks
            # Get slice of mask with all singleton dimensions except
            # channels dimension
            index = [0] * (len(input_shape))
            index[layer.axis] = slice(None)
            index = index[1:]
            # TODO: Maybe use channel indices everywhere instead of masks?
            channel_indices = np.where(inbound_masks[index] == False)[0]
            weights = [
                np.delete(w, channel_indices, axis=-1)
                for w in layer.get_weights()
            ]
            new_layer = BatchNormalization.from_config(layer.get_config())
            new_input_shape = list(input_shape)
            new_input_shape[new_layer.axis] -= len(channel_indices)
            new_layer.build(new_input_shape)
            new_layer.set_weights(weights)

        else:
            # Not implemented:
            # - Lambda
            # - SeparableConv2D
            # - Conv2DTranspose
            # - LocallyConnected1D
            # - LocallyConnected2D
            # - TimeDistributed
            # - Bidirectional
            # - Dot
            # - PReLU
            # Warning/error needed for Reshape if channels axis is split
            raise ValueError('"{0}" layers are currently '
                             'unsupported.'.format(layer_class))
        print(node, "node return")
        return new_layer, outbound_mask
コード例 #2
0
ファイル: test.py プロジェクト: SAOBunk/ImagingBEP
# Add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
# let's add a fully-connected layer
x = Dense(1024, activation='relu')(x)
# and a logistic layer -- sigmoid to make sure all predictions lie between 0 and 1
predictions = Dense(1, activation='sigmoid')(x)

#Change the momentum for the good of the people
for i, layer in enumerate(base_model.layers):
    name = str(layer.name)
    #print(name[0:2])
    if (name[0:2] == "bn"):
        config = layer.get_config()
        config['momentum'] = 0.01
        base_model.layers[i] = BatchNormalization.from_config(config)

# This is the model we will train
model = Model(inputs=base_model.input, outputs=predictions)

# Compile the model (should be done *after* setting layers to non-trainable)
adam = optimizers.Adam(lr=0.001,
                       beta_1=0.9,
                       beta_2=0.999,
                       epsilon=None,
                       decay=0.0)
model.compile(optimizer='adam',
              loss='mean_squared_logarithmic_error',
              metrics=['mean_squared_error'])

print("Compiled model.")
コード例 #3
0
    def _apply_delete_mask(self, node, inbound_masks):
        """Apply the inbound delete mask and return the outbound delete mask

        When specific channels in a layer or layer instance are deleted, the
        mask propagates information about which channels are affected to
        downstream layers.
        If the layer contains weights, those which were previously connected
        to the deleted channels are deleted and outbound masks are set to None
        since further downstream layers aren't affected.
        If the layer does not contain weights, its output mask is calculated to
        reflect any transformations performed by the layer to ensure that
        information about the deleted channels is propagated downstream.


        Arguments:
            node(Node): The node where the delete mask is applied.
            inbound_masks: Mask(s) from inbound node(s).

        Returns:
            new_layer: Pass through `layer` if it has no weights, otherwise a
                       new `Layer` object with weights corresponding to the
                       inbound mask deleted.
            outbound_mask: Mask corresponding to `new_layer`.
        """

        # if delete_mask is None or all values are True, it does not affect
        # this layer or any layers above/downstream from it
        layer = node.outbound_layer
        if all(mask is None for mask in inbound_masks):
            new_layer = layer
            outbound_mask = None
            return new_layer, outbound_mask

        # If one or more of the masks are None, replace them with ones.
        if any(mask is None for mask in inbound_masks):
            inbound_masks = [
                np.ones(shape[1:], dtype=bool)
                if inbound_masks[i] is None else inbound_masks[i]
                for i, shape in enumerate(node.input_shapes)
            ]

        # If the layer is shared and has already been affected by this
        # operation, use the cached new layer.
        if (len(get_inbound_nodes(layer)) > 1
                and layer in self._replace_layers_map.keys()):
            return self._replace_layers_map[layer]

        output_shape = utils.single_element(node.output_shapes)
        input_shape = utils.single_element(node.input_shapes)
        data_format = getattr(layer, "data_format", "channels_last")
        inbound_masks = utils.single_element(inbound_masks)
        # otherwise, delete_mask.shape should be: layer.input_shape[1:]
        layer_class = layer.__class__.__name__
        if layer_class == "InputLayer":
            raise RuntimeError("This should never get here!")

        elif layer_class == "Dense":
            if np.all(inbound_masks):
                new_layer = layer
            else:
                weights = layer.get_weights()
                weights[0] = weights[0][np.where(inbound_masks)[0], :]
                config = layer.get_config()
                config["weights"] = weights
                new_layer = type(layer).from_config(config)
            outbound_mask = None

        elif layer_class == "Flatten":
            outbound_mask = np.reshape(inbound_masks, [
                -1,
            ])
            new_layer = layer

        elif layer_class in ("Conv1D", "Conv2D", "Conv3D"):
            if np.all(inbound_masks):
                new_layer = layer
            else:
                if data_format == "channels_first":
                    inbound_masks = np.swapaxes(inbound_masks, 0, -1)
                # Conv layer: trim down inbound_masks to filter shape
                k_size = layer.kernel_size
                index = [slice(None, 1, None) for _ in k_size]
                inbound_masks = inbound_masks[tuple(index + [slice(None)])]
                weights = layer.get_weights()
                # Delete unused weights to obtain new_weights
                # Each deleted channel was connected to all of the channels
                # in layer; therefore, the mask must be repeated for each
                # channel.
                # `delete_mask`'s size: size(weights[0])
                delete_mask = np.tile(
                    inbound_masks[..., np.newaxis],
                    list(k_size) + [1, weights[0].shape[-1]],
                )
                new_shape = list(weights[0].shape)
                new_shape[-2] = -1  # Weights always have channels_last
                weights[0] = np.reshape(weights[0][delete_mask], new_shape)
                # Instantiate new layer with new_weights
                config = layer.get_config()
                config["weights"] = weights
                new_layer = type(layer).from_config(config)
            outbound_mask = None

        elif layer_class in (
                "Cropping1D",
                "Cropping2D",
                "Cropping3D",
                "MaxPooling1D",
                "MaxPooling2D",
                "MaxPooling3D",
                "AveragePooling1D",
                "AveragePooling2D",
                "AveragePooling3D",
        ):
            index = [slice(None, x, None) for x in output_shape[1:]]
            if data_format == "channels_first":
                index[0] = slice(None)
            elif data_format == "channels_last":
                index[-1] = slice(None)
            else:
                raise ValueError("Invalid data format")
            outbound_mask = inbound_masks[tuple(index)]
            new_layer = layer

        elif layer_class in (
                "UpSampling1D",
                "UpSampling2D",
                "UpSampling3D",
                "ZeroPadding1D",
                "ZeroPadding2D",
                "ZeroPadding3D",
        ):

            # Get slice of mask with all singleton dimensions except
            # channels dimension
            index = [slice(1)] * (len(input_shape) - 1)
            tile_shape = list(output_shape[1:])
            if data_format == "channels_first":
                index[0] = slice(None)
                tile_shape[0] = 1
            elif data_format == "channels_last":
                index[-1] = slice(None)
                tile_shape[-1] = 1
            else:
                raise ValueError("Invalid data format")
            channels_vector = inbound_masks[tuple(index)]
            # Tile this slice to create the outbound mask
            outbound_mask = np.tile(channels_vector, tile_shape)
            new_layer = layer

        elif layer_class in (
                "GlobalMaxPooling1D",
                "GlobalMaxPooling2D",
                "GlobalAveragePooling1D",
                "GlobalAveragePooling2D",
        ):
            # Get slice of mask with all singleton dimensions except
            # channels dimension
            index = [0] * (len(input_shape) - 1)
            if data_format == "channels_first":
                index[0] = slice(None)
            elif data_format == "channels_last":
                index[-1] = slice(None)
            else:
                raise ValueError("Invalid data format")
            channels_vector = inbound_masks[tuple(index)]
            # Tile this slice to create the outbound mask
            outbound_mask = channels_vector
            new_layer = layer

        elif layer_class in (
                "Dropout",
                "Activation",
                "SpatialDropout1D",
                "SpatialDropout2D",
                "SpatialDropout3D",
                "ActivityRegularization",
                "Masking",
                "LeakyReLU",
                "ELU",
                "ThresholdedReLU",
                "GaussianNoise",
                "GaussianDropout",
                "AlphaDropout",
                "Rename",
        ) or (layer_class == "TensorFlowOpLayer"
              and layer.node_def.op == "ResizeBilinear"):
            # Pass-through layers
            outbound_mask = inbound_masks
            new_layer = layer

        elif layer_class == "Reshape":
            outbound_mask = np.reshape(inbound_masks, layer.target_shape)
            new_layer = layer

        elif layer_class == "Permute":
            outbound_mask = np.transpose(inbound_masks,
                                         [x - 1 for x in layer.dims])
            new_layer = layer

        elif layer_class == "RepeatVector":
            outbound_mask = np.repeat(np.expand_dims(inbound_masks, 0),
                                      layer.n,
                                      axis=0)
            new_layer = layer

        elif layer_class == "Embedding":
            # Embedding will always be the first layer so it doesn't need
            # to consider the inbound_delete_mask
            if inbound_masks is not None:
                raise ValueError("Channels cannot be deleted bedore Embedding "
                                 "layers because they change the number of "
                                 "channels.")
            outbound_mask = None
            new_layer = layer

        elif layer_class in ("Add", "Multiply", "Average", "Maximum"):
            # The inputs must be the same size
            if not utils.all_equal(inbound_masks):
                ValueError(
                    "{0} layers must have the same size inputs. All "
                    "inbound nodes must have the same channels deleted".format(
                        layer_class))
            outbound_mask = inbound_masks[1]
            new_layer = layer

        elif layer_class == "Concatenate":
            axis = layer.axis
            if layer.axis < 0:
                axis = axis % len(layer.input_shape[0])
            # Below: axis=axis-1 because the mask excludes the batch dimension
            outbound_mask = np.concatenate(inbound_masks, axis=axis - 1)
            new_layer = layer

        elif layer_class in ("SimpleRNN", "GRU", "LSTM"):
            if np.all(inbound_masks):
                new_layer = layer
            else:
                weights = layer.get_weights()
                weights[0] = weights[0][np.where(inbound_masks[0, :])[0], :]
                config = layer.get_config()
                config["weights"] = weights
                new_layer = type(layer).from_config(config)
            outbound_mask = None

        elif layer_class == "BatchNormalization":
            outbound_mask = inbound_masks
            # Get slice of mask with all singleton dimensions except
            # channels dimension
            index = [0] * (len(input_shape))
            index[layer.axis] = slice(None)
            index = index[1:]
            # TODO: Maybe use channel indices everywhere instead of masks?
            channel_indices = np.where(inbound_masks[tuple(index)] == False)[0]
            weights = [
                np.delete(w, channel_indices, axis=-1)
                for w in layer.get_weights()
            ]
            new_layer = BatchNormalization.from_config(layer.get_config())
            new_input_shape = list(input_shape)
            new_input_shape[new_layer.axis] -= len(channel_indices)
            new_layer.build(new_input_shape)
            new_layer.set_weights(weights)

        else:
            # Not implemented:
            # - Lambda
            # - SeparableConv2D
            # - Conv2DTranspose
            # - LocallyConnected1D
            # - LocallyConnected2D
            # - TimeDistributed
            # - Bidirectional
            # - Dot
            # - PReLU
            # Warning/error needed for Reshape if channels axis is split
            raise ValueError('"{0}" layers are currently '
                             "unsupported.".format(layer_class))

        if len(get_inbound_nodes(layer)) > 1 and new_layer != layer:
            self._replace_layers_map[layer] = (new_layer, outbound_mask)

        return new_layer, outbound_mask
コード例 #4
0
ファイル: Pruning_Utils.py プロジェクト: IBM/GradSigns
def PrunWeight(model, model_name, x_prune, y_prune, x_test, y_test, pruning_rate, compile_info , fine_tune):
    
    ############ Calculating weight limit for pruning ##### 
    ############ We do not consider biases in the pruning process #####
    parameters = []
    conv_layers_weights = []
    for layer in model.layers:
        if layer.get_config()['name'].find("conv") != -1:
            conv_layers_weights.append(layer.get_weights())

    for _, layer_weights in enumerate(conv_layers_weights):
        parameters.append(K.flatten(K.abs(layer_weights[0])))

    dense_layers_weights = []
    for layer in model.layers:
        if layer.get_config()['name'].find("dense") != -1:
            dense_layers_weights.append(layer.get_weights())

    for _, layer_weights in enumerate(dense_layers_weights):
        parameters.append(K.flatten(K.abs(layer_weights[0])))
    
    parameters =  K.concatenate(parameters)
    parameters = sorted(K.get_value(parameters).tolist())
    weight_limit = parameters[int(pruning_rate*len(parameters))]
    print("Pruning weight threshhold : ", weight_limit)
    ##################################################################
    dense_layers_weights = []
    conv_filter_weights = []
    batch_norm_params = []
    kernel_masks_for_dense_and_conv_layers = []
    model_tensors_dict = {}
    input_height,input_width,input_channels = model.input.shape[1:]

    pruned_model_input = Input(shape=(int(input_height),int(input_width),int(input_channels)))

    if model.layers[0].name.find('input') == -1:
        model_tensors_dict[str(model.layers[0].input.name)] = pruned_model_input
    else:
        model_tensors_dict[str(model.layers[0].output.name)] = pruned_model_input
        
    Flow = pruned_model_input
    
    for _,layer in enumerate(model.layers):
        if layer.get_config()['name'].find("conv2d") != -1:
            kernel_mask = K.cast(weight_limit <= K.abs(layer.get_weights()[0]) ,'float32')
            kernel_masks_for_dense_and_conv_layers.append(kernel_mask)
            Flow  = MaskedConv2D(filters=layer.get_config()['filters'], kernel_size=layer.get_config()['kernel_size'],kernel_initializer=layer.get_config()['kernel_initializer'], 
            kernel_regularizer= layer.get_config()['kernel_regularizer'], strides=layer.get_config()['strides'],
            padding=layer.get_config()['padding'], activation=layer.get_config()['activation'], use_bias=layer.get_config()['use_bias'], Masked=True , kernel_mask_val=kernel_mask)(model_tensors_dict[str(layer.input.name)])
            conv_filter_weights.append(layer.get_weights())
            model_tensors_dict[str(layer.output.name)] = Flow
            
        elif layer.get_config()['name'].find("dense") != -1:
            kernel_mask = K.cast(weight_limit <= K.abs(layer.get_weights()[0]) ,'float32')
            kernel_masks_for_dense_and_conv_layers.append(kernel_mask)
            Flow = MaskedDense(units=layer.get_config()['units'], activation=layer.get_config()['activation'],
            use_bias=layer.get_config()['use_bias'], kernel_initializer = layer.get_config()['kernel_initializer'],
            Masked=True , kernel_mask_val=kernel_mask)(model_tensors_dict[str(layer.input.name)])
            dense_layers_weights.append(layer.get_weights())
            model_tensors_dict[str(layer.output.name)] = Flow

        elif layer.get_config()['name'].find("activation") != -1:
            Flow = Activation.from_config(layer.get_config())(model_tensors_dict[str(layer.input.name)])
            model_tensors_dict[str(layer.output.name)] = Flow
            
        elif layer.get_config()['name'].find("max_pooling") != -1:
            Flow = MaxPooling2D.from_config(layer.get_config())(model_tensors_dict[str(layer.input.name)])
            model_tensors_dict[str(layer.output.name)] = Flow
        
        elif layer.get_config()['name'].find("average_pooling") != -1:
            Flow = AveragePooling2D.from_config(layer.get_config())(model_tensors_dict[str(layer.input.name)])
            model_tensors_dict[str(layer.output.name)] = Flow
        
        elif layer.get_config()['name'].find("dropout") != -1:
            Flow = Dropout.from_config(layer.get_config())(model_tensors_dict[str(layer.input.name)])
            model_tensors_dict[str(layer.output.name)] = Flow

        elif layer.get_config()['name'].find("flatten") != -1:
            Flow = Flatten.from_config(layer.get_config())(model_tensors_dict[str(layer.input.name)])
            model_tensors_dict[str(layer.output.name)] = Flow

        elif layer.get_config()['name'].find("add") != -1:
            input_tensors_list = []
            for idx in range(len(layer.input)):
                input_tensors_list.append(model_tensors_dict[layer.input[idx].name])
            Flow = add(input_tensors_list)
            model_tensors_dict[str(layer.output.name)] = Flow
        
        elif layer.get_config()['name'].find("batch_normalization") != -1:
            batch_norm_params.append(layer.get_weights())
            Flow = BatchNormalization.from_config(layer.get_config())(model_tensors_dict[str(layer.input.name)])
            model_tensors_dict[str(layer.output.name)] = Flow
        
        elif layer.get_config()['name'].find("input") != -1:
            pass
            
    pruned_model  = Model(pruned_model_input, Flow)
    ########################## setting the weight s of layers #############################
    for layer in pruned_model.layers:
        if layer.get_config()['name'].find("dense") != -1:
            pruned_weights = [dense_layers_weights[0][0]*K.get_value(kernel_masks_for_dense_and_conv_layers[0])]
            if layer.get_config()['use_bias']:
                pruned_weights.append(dense_layers_weights[0][1])
            layer.set_weights(pruned_weights)
            del kernel_masks_for_dense_and_conv_layers[0]
            del dense_layers_weights[0]
        
        elif layer.get_config()['name'].find("conv2d") != -1:
            pruned_weights = [conv_filter_weights[0][0]*K.get_value(kernel_masks_for_dense_and_conv_layers[0])]
            if layer.get_config()['use_bias']:
                pruned_weights.append(conv_filter_weights[0][1])
            layer.set_weights(pruned_weights)
            del kernel_masks_for_dense_and_conv_layers[0]
            del conv_filter_weights[0]
            
        elif layer.get_config()['name'].find("batch") != -1:
            layer.set_weights(batch_norm_params[0])
            del batch_norm_params[0]
    ############################### Fine-tuning ############################################
    pruned_model.compile(loss=compile_info['loss'],
            optimizer=compile_info['optimizer'],
            metrics=compile_info['metrics'])
    
    if not fine_tune:
        return pruned_model
    else:
        early_stopping = EarlyStopping(monitor='val_acc', patience=2,verbose=0)
        callbacks = [early_stopping]
        # fine-tuning the network.
        pruned_model.fit(x_prune, y_prune,
                    batch_size=256,
                    epochs=10,
                    validation_data=(x_test, y_test),
                    shuffle=True,
                    callbacks=callbacks,
                    verbose=0
                    )

        return pruned_model