Ejemplo n.º 1
0
def cloneLayerFromLayer(pLayer):
    if isinstance(pLayer, Convolution1D):
        return Convolution1D.from_config(pLayer.get_config())
    elif isinstance(pLayer, Convolution2D):
        return Convolution2D.from_config(pLayer.get_config())
    elif isinstance(pLayer, Convolution3D):
        return Convolution3D.from_config(pLayer.get_config())
    # Max-Pooling:
    elif isinstance(pLayer, MaxPooling1D):
        return MaxPooling2D.from_config(pLayer.get_config())
    elif isinstance(pLayer, MaxPooling2D):
        return MaxPooling2D.from_config(pLayer.get_config())
    elif isinstance(pLayer, MaxPooling3D):
        return MaxPooling3D.from_config(pLayer.get_config())
    # Average-Pooling
    elif isinstance(pLayer, AveragePooling1D):
        return AveragePooling1D.from_config(pLayer.get_config())
    elif isinstance(pLayer, AveragePooling2D):
        return AveragePooling2D.from_config(pLayer.get_config())
    elif isinstance(pLayer, AveragePooling3D):
        return AveragePooling3D.from_config(pLayer.get_config())
    #
    elif isinstance(pLayer, Flatten):
        return Flatten.from_config(pLayer.get_config())
    elif isinstance(pLayer, Merge):
        return Merge.from_config(pLayer.get_config())
    elif isinstance(pLayer, Activation):
        return Activation.from_config(pLayer.get_config())
    elif isinstance(pLayer, Dropout):
        return Dropout.from_config(pLayer.get_config())
    #
    elif isinstance(pLayer, Dense):
        return Dense.from_config(pLayer.get_config())
    return None
Ejemplo n.º 2
0
def read_model(src_model):
    ## args

    ##  list of kernels to remove.
    model = load_model(src_model)
    inp = (model.inputs[0].shape.dims[1].value,
           model.inputs[0].shape.dims[2].value,
           model.inputs[0].shape.dims[3].value)

    H = Input(inp)
    inp = H

    for i in range(len(model.layers)):
        layer = model.get_layer(index=i)
        config = layer.get_config()

        if isinstance(layer, MaxPooling2D):
            H = MaxPooling2D.from_config(config)(H)

        if isinstance(layer, Dropout):
            H = Dropout.from_config(config)(H)

        if isinstance(layer, Activation):
            H = Activation.from_config(config)(H)
        elif isinstance(layer, Conv2D):
            weights = layer.get_weights()
            config['trainable'] = True
            H = Conv2D(
                activation=config['activation'],
                activity_regularizer=config['activity_regularizer'],
                bias_constraint=config['bias_constraint'],
                bias_regularizer=config['bias_regularizer'],
                data_format=config['data_format'],
                dilation_rate=config['dilation_rate'],
                filters=config['filters'],
                kernel_constraint=config['kernel_constraint'],
                # config=config['config'],
                # scale=config['scale'],
                kernel_regularizer=config['kernel_regularizer'],
                kernel_size=config['kernel_size'],
                name=config['name'],
                padding=config['padding'],
                strides=config['strides'],
                trainable=config['trainable'],
                use_bias=config['use_bias'],
                weights=weights)(H)

        elif isinstance(layer, Flatten):
            H = Flatten()(H)

        elif isinstance(layer, Dense):
            weights = layer.get_weights()
            config['trainable'] = True
            config['name'] = config['name'] + "1"
            H = Dense(units=config['units'],
                      activation=config['activation'],
                      activity_regularizer=config['activity_regularizer'],
                      bias_constraint=config['bias_constraint'],
                      bias_regularizer=config['bias_regularizer'],
                      kernel_constraint=config['kernel_constraint'],
                      kernel_regularizer=config['kernel_regularizer'],
                      kernel_initializer='glorot_uniform',
                      name=config['name'],
                      trainable=config['trainable'],
                      use_bias=config['use_bias'])(H)

    ## it returns the model changed
    return Model(inp, H)
Ejemplo n.º 3
0
    def transform_layer(layer, next_layer, queue_ctr, flattened):
        print("transform {} (next = {})".format(layer, next_layer))
        new_layers = []
        skip_next = False

        if isinstance(layer, InputLayer):
            new_layers.append(InputLayer.from_config(layer.get_config()))

        elif isinstance(layer, Conv2D) and not isinstance(layer, DepthwiseConv2D):
            conf = layer.get_config()

            act = conf['activation']

            # if the next layer is a pooling layer, create a fused activation
            maxpool_params = None
            if slalom and isinstance(next_layer, MaxPooling2D):
                mp = next_layer
                assert (layer.activation == relu)
                maxpool_params = mp.get_config()
                skip_next = True

            act_layer = None
            if act != "linear":
                conf['activation'] = "linear"

                if slalom and isinstance(next_layer, GlobalAveragePooling2D):
                    assert layer.activation in [relu, relu6]
                    act = "avgpool" + act
                    skip_next = True

                act_layer = ActivationQ(act, bits_w, bits_x, maxpool_params=maxpool_params, log=log,
                                        quantize=quantize, slalom=slalom, slalom_integrity=slalom_integrity,
                                        slalom_privacy=slalom_privacy, sgxutils=sgxutils,
                                        queue=None if queues is None else queues[queue_ctr])
                queue_ctr += 1

            conf['bits_w'] = bits_w
            conf['bits_x'] = bits_x
            conf['log'] = log
            conf['quantize'] = quantize
            conf['slalom'] = slalom
            conf['slalom_integrity'] = slalom_integrity
            conf['slalom_privacy'] = slalom_privacy
            conf['sgxutils'] = sgxutils

            new_layer = Conv2DQ.from_config(conf)
            new_layers.append(new_layer)
            layer_map[new_layer] = layer

            if act_layer is not None:
                new_layers.append(act_layer)

        elif isinstance(layer, DepthwiseConv2D):
            conf = layer.get_config()

            assert conf['activation'] == "linear"

            conf['bits_w'] = bits_w
            conf['bits_x'] = bits_x
            conf['log'] = log
            conf['quantize'] = quantize
            conf['slalom'] = slalom
            conf['slalom_integrity'] = slalom_integrity
            conf['slalom_privacy'] = slalom_privacy
            conf['sgxutils'] = sgxutils

            new_layer = DepthwiseConv2DQ.from_config(conf)
            new_layers.append(new_layer)
            layer_map[new_layer] = layer

        elif isinstance(layer, Dense):
            conf = layer.get_config()

            act = conf['activation']

            act_layer = None
            if act != "linear":
                conf['activation'] = "linear"
                act_layer = ActivationQ(act, bits_w, bits_x, log=log,
                                        quantize=quantize, slalom=slalom, slalom_integrity=slalom_integrity,
                                        slalom_privacy=slalom_privacy, sgxutils=sgxutils,
                                        queue=None if queues is None else queues[queue_ctr])
                queue_ctr += 1

            conf['bits_w'] = bits_w
            conf['bits_x'] = bits_x
            conf['log'] = log
            conf['quantize'] = quantize
            conf['slalom'] = slalom
            conf['slalom_integrity'] = slalom_integrity
            conf['slalom_privacy'] = slalom_privacy
            conf['sgxutils'] = sgxutils

            # replace the dense layer by a pointwise convolution
            if verif_preproc:
                del conf['units']
                conf['filters'] = layer.units
                conf['kernel_size'] = 1
                if not flattened:
                    h_in = int(layer.input_spec.axes[-1])
                    new_layers.append(Reshape((1, 1, h_in)))
                    flattened = True
                new_layer = Conv2DQ.from_config(conf)
                new_layers.append(new_layer)
                layer_map[new_layer] = layer

            else:
                new_layer = DenseQ.from_config(conf)
                new_layers.append(new_layer)
                layer_map[new_layer] = layer

            if act_layer is not None:
                new_layers.append(act_layer)

        elif isinstance(layer, BatchNormalization):
            pass

        elif isinstance(layer, MaxPooling2D):
            assert (not slalom or not slalom_privacy)
            new_layers.append(MaxPooling2D.from_config(layer.get_config()))

        elif isinstance(layer, AveragePooling2D):
            assert (not slalom or not slalom_privacy)
            new_layers.append(AveragePooling2D.from_config(layer.get_config()))
            new_layers.append(Lambda(lambda x: K.round(x)))

        elif isinstance(layer, Activation):
            assert layer.activation in [relu6, relu, softmax]

            queue = None if queues is None else queues[queue_ctr]
            queue_ctr += 1

            act_func = "relu6" if layer.activation == relu6 else "relu" if layer.activation == relu else "softmax"
            if slalom and isinstance(next_layer, GlobalAveragePooling2D):
                #assert layer.activation == relu6
                act_func = "avgpoolrelu6"
                skip_next = True

            maxpool_params = None
            if slalom and (isinstance(next_layer, MaxPooling2D) or isinstance(next_layer, AveragePooling2D)):
                mp = next_layer
                assert (layer.activation == relu)
                maxpool_params = mp.get_config()
                skip_next = True

            new_layers.append(ActivationQ(act_func, bits_w, bits_x, log=log,
                                      maxpool_params=maxpool_params,
                                      quantize=quantize, slalom=slalom,
                                      slalom_integrity=slalom_integrity,
                                      slalom_privacy=slalom_privacy,
                                      sgxutils=sgxutils, queue=queue))

        elif isinstance(layer, ZeroPadding2D):
            if quantize:
                # merge with next layer
                conv = next_layer 
                assert isinstance(conv, Conv2D) or isinstance(conv, DepthwiseConv2D)
                assert conv.padding == 'valid'
                conv.padding = 'same'
            else:
                new_layers.append(ZeroPadding2D.from_config(layer.get_config()))

        elif isinstance(layer, Flatten):
            if not verif_preproc:
                new_layers.append(Flatten.from_config(layer.get_config()))

        elif isinstance(layer, GlobalAveragePooling2D):
            assert not slalom
            conf = layer.get_config()
            conf['bits_w'] = bits_w
            conf['bits_x'] = bits_x
            conf['log'] = log
            conf['quantize'] = quantize
            new_layers.append(GlobalAveragePooling2DQ.from_config(conf))

        elif isinstance(layer, Reshape):
            new_layers.append(Reshape.from_config(layer.get_config()))

        elif isinstance(layer, Dropout):
            pass

        elif isinstance(layer, ResNetBlock):
            #assert not slalom

            path1 = []
            path2 = []
            for l in layer.path1:
                lq, queue_ctr, _, _ = transform_layer(l, None, queue_ctr, flattened)
                path1.extend(lq)

            for l in layer.path2:
                lq, queue_ctr, _, _ = transform_layer(l, None, queue_ctr, flattened)
                path2.extend(lq)

            [actq], queue_ctr, flattened, skip_next = transform_layer(layer.merge_act, next_layer, queue_ctr, flattened)
            new_layer = ResNetBlock(layer.kernel_size, layer.filters, layer.stage, layer.block, layer.identity,
                                    layer.strides, path1=path1, path2=path2, merge_act=actq, 
                                    quantize=quantize, bits_w=bits_w, bits_x=bits_x,
                                    slalom=slalom, slalom_integrity=slalom_integrity, slalom_privacy=slalom_privacy)

            new_layers.append(new_layer)
        else:
            raise AttributeError("Don't know how to handle layer {}".format(layer))

        return new_layers, queue_ctr, flattened, skip_next
Ejemplo n.º 4
0
def transform(model, bits_w, bits_x, log=False, quantize=True, verif_preproc=False,
              slalom=False, slalom_integrity=False, slalom_privacy=False, sgxutils=None, queues=None):

    if slalom:
        assert(quantize)

    old_ops = K.get_session().graph.get_operations()

    #all_layers = [[l] if not isinstance(l, ResNetBlock) else l.get_layers() for l in model.layers]
    #all_layers = list(itertools.chain.from_iterable(all_layers))
    all_layers = get_all_layers(model)
    fuse_bn(all_layers)

    queue_ctr = 0
    layers = model.layers
    layer_map = {}
    flattened = False

    def transform_layer(layer, next_layer, queue_ctr, flattened):
        print("transform {} (next = {})".format(layer, next_layer))
        new_layers = []
        skip_next = False

        if isinstance(layer, InputLayer):
            new_layers.append(InputLayer.from_config(layer.get_config()))

        elif isinstance(layer, Conv2D) and not isinstance(layer, DepthwiseConv2D):
            conf = layer.get_config()

            act = conf['activation']

            # if the next layer is a pooling layer, create a fused activation
            maxpool_params = None
            if slalom and isinstance(next_layer, MaxPooling2D):
                mp = next_layer
                assert (layer.activation == relu)
                maxpool_params = mp.get_config()
                skip_next = True

            act_layer = None
            if act != "linear":
                conf['activation'] = "linear"

                if slalom and isinstance(next_layer, GlobalAveragePooling2D):
                    assert layer.activation in [relu, relu6]
                    act = "avgpool" + act
                    skip_next = True

                act_layer = ActivationQ(act, bits_w, bits_x, maxpool_params=maxpool_params, log=log,
                                        quantize=quantize, slalom=slalom, slalom_integrity=slalom_integrity,
                                        slalom_privacy=slalom_privacy, sgxutils=sgxutils,
                                        queue=None if queues is None else queues[queue_ctr])
                queue_ctr += 1

            conf['bits_w'] = bits_w
            conf['bits_x'] = bits_x
            conf['log'] = log
            conf['quantize'] = quantize
            conf['slalom'] = slalom
            conf['slalom_integrity'] = slalom_integrity
            conf['slalom_privacy'] = slalom_privacy
            conf['sgxutils'] = sgxutils

            new_layer = Conv2DQ.from_config(conf)
            new_layers.append(new_layer)
            layer_map[new_layer] = layer

            if act_layer is not None:
                new_layers.append(act_layer)

        elif isinstance(layer, DepthwiseConv2D):
            conf = layer.get_config()

            assert conf['activation'] == "linear"

            conf['bits_w'] = bits_w
            conf['bits_x'] = bits_x
            conf['log'] = log
            conf['quantize'] = quantize
            conf['slalom'] = slalom
            conf['slalom_integrity'] = slalom_integrity
            conf['slalom_privacy'] = slalom_privacy
            conf['sgxutils'] = sgxutils

            new_layer = DepthwiseConv2DQ.from_config(conf)
            new_layers.append(new_layer)
            layer_map[new_layer] = layer

        elif isinstance(layer, Dense):
            conf = layer.get_config()

            act = conf['activation']

            act_layer = None
            if act != "linear":
                conf['activation'] = "linear"
                act_layer = ActivationQ(act, bits_w, bits_x, log=log,
                                        quantize=quantize, slalom=slalom, slalom_integrity=slalom_integrity,
                                        slalom_privacy=slalom_privacy, sgxutils=sgxutils,
                                        queue=None if queues is None else queues[queue_ctr])
                queue_ctr += 1

            conf['bits_w'] = bits_w
            conf['bits_x'] = bits_x
            conf['log'] = log
            conf['quantize'] = quantize
            conf['slalom'] = slalom
            conf['slalom_integrity'] = slalom_integrity
            conf['slalom_privacy'] = slalom_privacy
            conf['sgxutils'] = sgxutils

            # replace the dense layer by a pointwise convolution
            if verif_preproc:
                del conf['units']
                conf['filters'] = layer.units
                conf['kernel_size'] = 1
                if not flattened:
                    h_in = int(layer.input_spec.axes[-1])
                    new_layers.append(Reshape((1, 1, h_in)))
                    flattened = True
                new_layer = Conv2DQ.from_config(conf)
                new_layers.append(new_layer)
                layer_map[new_layer] = layer

            else:
                new_layer = DenseQ.from_config(conf)
                new_layers.append(new_layer)
                layer_map[new_layer] = layer

            if act_layer is not None:
                new_layers.append(act_layer)

        elif isinstance(layer, BatchNormalization):
            pass

        elif isinstance(layer, MaxPooling2D):
            assert (not slalom or not slalom_privacy)
            new_layers.append(MaxPooling2D.from_config(layer.get_config()))

        elif isinstance(layer, AveragePooling2D):
            assert (not slalom or not slalom_privacy)
            new_layers.append(AveragePooling2D.from_config(layer.get_config()))
            new_layers.append(Lambda(lambda x: K.round(x)))

        elif isinstance(layer, Activation):
            assert layer.activation in [relu6, relu, softmax]

            queue = None if queues is None else queues[queue_ctr]
            queue_ctr += 1

            act_func = "relu6" if layer.activation == relu6 else "relu" if layer.activation == relu else "softmax"
            if slalom and isinstance(next_layer, GlobalAveragePooling2D):
                #assert layer.activation == relu6
                act_func = "avgpoolrelu6"
                skip_next = True

            maxpool_params = None
            if slalom and (isinstance(next_layer, MaxPooling2D) or isinstance(next_layer, AveragePooling2D)):
                mp = next_layer
                assert (layer.activation == relu)
                maxpool_params = mp.get_config()
                skip_next = True

            new_layers.append(ActivationQ(act_func, bits_w, bits_x, log=log,
                                      maxpool_params=maxpool_params,
                                      quantize=quantize, slalom=slalom,
                                      slalom_integrity=slalom_integrity,
                                      slalom_privacy=slalom_privacy,
                                      sgxutils=sgxutils, queue=queue))

        elif isinstance(layer, ZeroPadding2D):
            if quantize:
                # merge with next layer
                conv = next_layer 
                assert isinstance(conv, Conv2D) or isinstance(conv, DepthwiseConv2D)
                assert conv.padding == 'valid'
                conv.padding = 'same'
            else:
                new_layers.append(ZeroPadding2D.from_config(layer.get_config()))

        elif isinstance(layer, Flatten):
            if not verif_preproc:
                new_layers.append(Flatten.from_config(layer.get_config()))

        elif isinstance(layer, GlobalAveragePooling2D):
            assert not slalom
            conf = layer.get_config()
            conf['bits_w'] = bits_w
            conf['bits_x'] = bits_x
            conf['log'] = log
            conf['quantize'] = quantize
            new_layers.append(GlobalAveragePooling2DQ.from_config(conf))

        elif isinstance(layer, Reshape):
            new_layers.append(Reshape.from_config(layer.get_config()))

        elif isinstance(layer, Dropout):
            pass

        elif isinstance(layer, ResNetBlock):
            #assert not slalom

            path1 = []
            path2 = []
            for l in layer.path1:
                lq, queue_ctr, _, _ = transform_layer(l, None, queue_ctr, flattened)
                path1.extend(lq)

            for l in layer.path2:
                lq, queue_ctr, _, _ = transform_layer(l, None, queue_ctr, flattened)
                path2.extend(lq)

            [actq], queue_ctr, flattened, skip_next = transform_layer(layer.merge_act, next_layer, queue_ctr, flattened)
            new_layer = ResNetBlock(layer.kernel_size, layer.filters, layer.stage, layer.block, layer.identity,
                                    layer.strides, path1=path1, path2=path2, merge_act=actq, 
                                    quantize=quantize, bits_w=bits_w, bits_x=bits_x,
                                    slalom=slalom, slalom_integrity=slalom_integrity, slalom_privacy=slalom_privacy)

            new_layers.append(new_layer)
        else:
            raise AttributeError("Don't know how to handle layer {}".format(layer))

        return new_layers, queue_ctr, flattened, skip_next

    new_model = Sequential()
    skip_next = False
    l_count = 0
    localf = False
    while layers:
        layer = layers.pop(0)
        next_layer = layers[0] if len(layers) else None
        if (l_count <= 6):
            if not skip_next:
                new_layers, queue_ctr, flattened, skip_next = transform_layer(layer, next_layer, queue_ctr, flattened)
                for new_layer in new_layers:
                    new_model.add(new_layer)
            else:
                skip_next = False
        else:
            skip_next = False
            if isinstance(layer, MaxPooling2D):
                conf = layer.get_config()
                new_layer = MaxPooling2D.from_config(conf)
                new_layers.append(new_layer)
            elif isinstance(layer, Flatten):
                conf = layer.get_config()
                new_layer = Flatten.from_config(conf)
                new_layers.append(new_layer)
            elif isinstance(layer, Conv2D):
                conf = layer.get_config()
                #conf['bits_w'] = bits_w
                #conf['bits_x'] = bits_x
                #conf["log"] = log
                #conf['quantize'] = quantize
                #conf['slalom'] = None
                #conf['slalom_integrity'] = None
                #conf['slalom_privacy'] = None
                #conf['sgxutils'] = None
                new_layer = layer
                new_layers.append(new_layer)
                
            elif isinstance(layer, Dense):
                conf = layer.get_config()
                #conf["log"] = log
                #conf['quantize'] = quantize
                #conf['slalom'] = None
                #conf['slalom_integrity'] = None
                #conf['slalom_privacy'] = None
                #conf['sgxutils'] = None
                #conf['bits_w'] = bits_w
                #conf['bits_x'] = bits_x
                new_layer = layer
               	new_layers.append(new_layer)
                
            else:
                print(type(layer))
                assert(False)
            new_model.add(new_layer)
        l_count = l_count + 1  
    print("transformed summery")

    print(new_model.summary())

    # copy over (and potentially quantize) all the weights
    new_layers = get_all_layers(new_model)

    for layer in new_layers:
        if layer in layer_map:
            src_layer = layer_map[layer]

            weights = src_layer.get_weights()
            kernel = weights[0]
            bias = weights[1]
            print(layer)
            if quantize and isinstance(layer, Conv2DQ):
                range_w = 2**bits_w
                range_x = 2**bits_x
                kernel_q = np.round(range_w * kernel)
                bias_q = np.round(range_w * range_x * bias)
                if slalom_privacy:

                    if isinstance(layer, DepthwiseConv2DQ):
                        bias_q = bias_q.astype(np.float64)
                        kernel_q = kernel_q.astype(np.float64)

                layer._trainable_weights = layer._trainable_weights[2:]

                if isinstance(src_layer, Dense) and verif_preproc:
                    kernel_q = np.reshape(kernel_q, (1, 1, kernel_q.shape[0], kernel_q.shape[1]))

                layer.set_weights((kernel_q, bias_q))
            else:
                layer._trainable_weights = layer._trainable_weights[2:]
                layer.set_weights((kernel, bias))

    # find all the TensorFlow ops that correspond to inputs/outputs of linear operators
    new_ops = [op for op in K.get_session().graph.get_operations() if op not in old_ops]
    linear_ops_in = [tf.reshape(op.inputs[0], [-1]) for op in new_ops if op.type in ['Conv2D', 'MatMul', 'DepthwiseConv2dNative']]
    linear_ops_out = [tf.reshape(op.outputs[0], [-1]) for op in new_ops if op.type in ['BiasAdd']]

    return new_model, linear_ops_in, linear_ops_out
Ejemplo n.º 5
0
def PrunWeight(model, model_name, x_prune, y_prune, x_test, y_test, pruning_rate, compile_info , fine_tune):
    
    ############ Calculating weight limit for pruning ##### 
    ############ We do not consider biases in the pruning process #####
    parameters = []
    conv_layers_weights = []
    for layer in model.layers:
        if layer.get_config()['name'].find("conv") != -1:
            conv_layers_weights.append(layer.get_weights())

    for _, layer_weights in enumerate(conv_layers_weights):
        parameters.append(K.flatten(K.abs(layer_weights[0])))

    dense_layers_weights = []
    for layer in model.layers:
        if layer.get_config()['name'].find("dense") != -1:
            dense_layers_weights.append(layer.get_weights())

    for _, layer_weights in enumerate(dense_layers_weights):
        parameters.append(K.flatten(K.abs(layer_weights[0])))
    
    parameters =  K.concatenate(parameters)
    parameters = sorted(K.get_value(parameters).tolist())
    weight_limit = parameters[int(pruning_rate*len(parameters))]
    print("Pruning weight threshhold : ", weight_limit)
    ##################################################################
    dense_layers_weights = []
    conv_filter_weights = []
    batch_norm_params = []
    kernel_masks_for_dense_and_conv_layers = []
    model_tensors_dict = {}
    input_height,input_width,input_channels = model.input.shape[1:]

    pruned_model_input = Input(shape=(int(input_height),int(input_width),int(input_channels)))

    if model.layers[0].name.find('input') == -1:
        model_tensors_dict[str(model.layers[0].input.name)] = pruned_model_input
    else:
        model_tensors_dict[str(model.layers[0].output.name)] = pruned_model_input
        
    Flow = pruned_model_input
    
    for _,layer in enumerate(model.layers):
        if layer.get_config()['name'].find("conv2d") != -1:
            kernel_mask = K.cast(weight_limit <= K.abs(layer.get_weights()[0]) ,'float32')
            kernel_masks_for_dense_and_conv_layers.append(kernel_mask)
            Flow  = MaskedConv2D(filters=layer.get_config()['filters'], kernel_size=layer.get_config()['kernel_size'],kernel_initializer=layer.get_config()['kernel_initializer'], 
            kernel_regularizer= layer.get_config()['kernel_regularizer'], strides=layer.get_config()['strides'],
            padding=layer.get_config()['padding'], activation=layer.get_config()['activation'], use_bias=layer.get_config()['use_bias'], Masked=True , kernel_mask_val=kernel_mask)(model_tensors_dict[str(layer.input.name)])
            conv_filter_weights.append(layer.get_weights())
            model_tensors_dict[str(layer.output.name)] = Flow
            
        elif layer.get_config()['name'].find("dense") != -1:
            kernel_mask = K.cast(weight_limit <= K.abs(layer.get_weights()[0]) ,'float32')
            kernel_masks_for_dense_and_conv_layers.append(kernel_mask)
            Flow = MaskedDense(units=layer.get_config()['units'], activation=layer.get_config()['activation'],
            use_bias=layer.get_config()['use_bias'], kernel_initializer = layer.get_config()['kernel_initializer'],
            Masked=True , kernel_mask_val=kernel_mask)(model_tensors_dict[str(layer.input.name)])
            dense_layers_weights.append(layer.get_weights())
            model_tensors_dict[str(layer.output.name)] = Flow

        elif layer.get_config()['name'].find("activation") != -1:
            Flow = Activation.from_config(layer.get_config())(model_tensors_dict[str(layer.input.name)])
            model_tensors_dict[str(layer.output.name)] = Flow
            
        elif layer.get_config()['name'].find("max_pooling") != -1:
            Flow = MaxPooling2D.from_config(layer.get_config())(model_tensors_dict[str(layer.input.name)])
            model_tensors_dict[str(layer.output.name)] = Flow
        
        elif layer.get_config()['name'].find("average_pooling") != -1:
            Flow = AveragePooling2D.from_config(layer.get_config())(model_tensors_dict[str(layer.input.name)])
            model_tensors_dict[str(layer.output.name)] = Flow
        
        elif layer.get_config()['name'].find("dropout") != -1:
            Flow = Dropout.from_config(layer.get_config())(model_tensors_dict[str(layer.input.name)])
            model_tensors_dict[str(layer.output.name)] = Flow

        elif layer.get_config()['name'].find("flatten") != -1:
            Flow = Flatten.from_config(layer.get_config())(model_tensors_dict[str(layer.input.name)])
            model_tensors_dict[str(layer.output.name)] = Flow

        elif layer.get_config()['name'].find("add") != -1:
            input_tensors_list = []
            for idx in range(len(layer.input)):
                input_tensors_list.append(model_tensors_dict[layer.input[idx].name])
            Flow = add(input_tensors_list)
            model_tensors_dict[str(layer.output.name)] = Flow
        
        elif layer.get_config()['name'].find("batch_normalization") != -1:
            batch_norm_params.append(layer.get_weights())
            Flow = BatchNormalization.from_config(layer.get_config())(model_tensors_dict[str(layer.input.name)])
            model_tensors_dict[str(layer.output.name)] = Flow
        
        elif layer.get_config()['name'].find("input") != -1:
            pass
            
    pruned_model  = Model(pruned_model_input, Flow)
    ########################## setting the weight s of layers #############################
    for layer in pruned_model.layers:
        if layer.get_config()['name'].find("dense") != -1:
            pruned_weights = [dense_layers_weights[0][0]*K.get_value(kernel_masks_for_dense_and_conv_layers[0])]
            if layer.get_config()['use_bias']:
                pruned_weights.append(dense_layers_weights[0][1])
            layer.set_weights(pruned_weights)
            del kernel_masks_for_dense_and_conv_layers[0]
            del dense_layers_weights[0]
        
        elif layer.get_config()['name'].find("conv2d") != -1:
            pruned_weights = [conv_filter_weights[0][0]*K.get_value(kernel_masks_for_dense_and_conv_layers[0])]
            if layer.get_config()['use_bias']:
                pruned_weights.append(conv_filter_weights[0][1])
            layer.set_weights(pruned_weights)
            del kernel_masks_for_dense_and_conv_layers[0]
            del conv_filter_weights[0]
            
        elif layer.get_config()['name'].find("batch") != -1:
            layer.set_weights(batch_norm_params[0])
            del batch_norm_params[0]
    ############################### Fine-tuning ############################################
    pruned_model.compile(loss=compile_info['loss'],
            optimizer=compile_info['optimizer'],
            metrics=compile_info['metrics'])
    
    if not fine_tune:
        return pruned_model
    else:
        early_stopping = EarlyStopping(monitor='val_acc', patience=2,verbose=0)
        callbacks = [early_stopping]
        # fine-tuning the network.
        pruned_model.fit(x_prune, y_prune,
                    batch_size=256,
                    epochs=10,
                    validation_data=(x_test, y_test),
                    shuffle=True,
                    callbacks=callbacks,
                    verbose=0
                    )

        return pruned_model