def cloneLayerFromLayer(pLayer): if isinstance(pLayer, Convolution1D): return Convolution1D.from_config(pLayer.get_config()) elif isinstance(pLayer, Convolution2D): return Convolution2D.from_config(pLayer.get_config()) elif isinstance(pLayer, Convolution3D): return Convolution3D.from_config(pLayer.get_config()) # Max-Pooling: elif isinstance(pLayer, MaxPooling1D): return MaxPooling2D.from_config(pLayer.get_config()) elif isinstance(pLayer, MaxPooling2D): return MaxPooling2D.from_config(pLayer.get_config()) elif isinstance(pLayer, MaxPooling3D): return MaxPooling3D.from_config(pLayer.get_config()) # Average-Pooling elif isinstance(pLayer, AveragePooling1D): return AveragePooling1D.from_config(pLayer.get_config()) elif isinstance(pLayer, AveragePooling2D): return AveragePooling2D.from_config(pLayer.get_config()) elif isinstance(pLayer, AveragePooling3D): return AveragePooling3D.from_config(pLayer.get_config()) # elif isinstance(pLayer, Flatten): return Flatten.from_config(pLayer.get_config()) elif isinstance(pLayer, Merge): return Merge.from_config(pLayer.get_config()) elif isinstance(pLayer, Activation): return Activation.from_config(pLayer.get_config()) elif isinstance(pLayer, Dropout): return Dropout.from_config(pLayer.get_config()) # elif isinstance(pLayer, Dense): return Dense.from_config(pLayer.get_config()) return None
def transform_layer(layer, next_layer, queue_ctr, flattened): print("transform {} (next = {})".format(layer, next_layer)) new_layers = [] skip_next = False if isinstance(layer, InputLayer): new_layers.append(InputLayer.from_config(layer.get_config())) elif isinstance(layer, Conv2D) and not isinstance(layer, DepthwiseConv2D): conf = layer.get_config() act = conf['activation'] # if the next layer is a pooling layer, create a fused activation maxpool_params = None if slalom and isinstance(next_layer, MaxPooling2D): mp = next_layer assert (layer.activation == relu) maxpool_params = mp.get_config() skip_next = True act_layer = None if act != "linear": conf['activation'] = "linear" if slalom and isinstance(next_layer, GlobalAveragePooling2D): assert layer.activation in [relu, relu6] act = "avgpool" + act skip_next = True act_layer = ActivationQ(act, bits_w, bits_x, maxpool_params=maxpool_params, log=log, quantize=quantize, slalom=slalom, slalom_integrity=slalom_integrity, slalom_privacy=slalom_privacy, sgxutils=sgxutils, queue=None if queues is None else queues[queue_ctr]) queue_ctr += 1 conf['bits_w'] = bits_w conf['bits_x'] = bits_x conf['log'] = log conf['quantize'] = quantize conf['slalom'] = slalom conf['slalom_integrity'] = slalom_integrity conf['slalom_privacy'] = slalom_privacy conf['sgxutils'] = sgxutils new_layer = Conv2DQ.from_config(conf) new_layers.append(new_layer) layer_map[new_layer] = layer if act_layer is not None: new_layers.append(act_layer) elif isinstance(layer, DepthwiseConv2D): conf = layer.get_config() assert conf['activation'] == "linear" conf['bits_w'] = bits_w conf['bits_x'] = bits_x conf['log'] = log conf['quantize'] = quantize conf['slalom'] = slalom conf['slalom_integrity'] = slalom_integrity conf['slalom_privacy'] = slalom_privacy conf['sgxutils'] = sgxutils new_layer = DepthwiseConv2DQ.from_config(conf) new_layers.append(new_layer) layer_map[new_layer] = layer elif isinstance(layer, Dense): conf = layer.get_config() act = conf['activation'] act_layer = None if act != "linear": conf['activation'] = "linear" act_layer = ActivationQ(act, bits_w, bits_x, log=log, quantize=quantize, slalom=slalom, slalom_integrity=slalom_integrity, slalom_privacy=slalom_privacy, sgxutils=sgxutils, queue=None if queues is None else queues[queue_ctr]) queue_ctr += 1 conf['bits_w'] = bits_w conf['bits_x'] = bits_x conf['log'] = log conf['quantize'] = quantize conf['slalom'] = slalom conf['slalom_integrity'] = slalom_integrity conf['slalom_privacy'] = slalom_privacy conf['sgxutils'] = sgxutils # replace the dense layer by a pointwise convolution if verif_preproc: del conf['units'] conf['filters'] = layer.units conf['kernel_size'] = 1 if not flattened: h_in = int(layer.input_spec.axes[-1]) new_layers.append(Reshape((1, 1, h_in))) flattened = True new_layer = Conv2DQ.from_config(conf) new_layers.append(new_layer) layer_map[new_layer] = layer else: new_layer = DenseQ.from_config(conf) new_layers.append(new_layer) layer_map[new_layer] = layer if act_layer is not None: new_layers.append(act_layer) elif isinstance(layer, BatchNormalization): pass elif isinstance(layer, MaxPooling2D): assert (not slalom or not slalom_privacy) new_layers.append(MaxPooling2D.from_config(layer.get_config())) elif isinstance(layer, AveragePooling2D): assert (not slalom or not slalom_privacy) new_layers.append(AveragePooling2D.from_config(layer.get_config())) new_layers.append(Lambda(lambda x: K.round(x))) elif isinstance(layer, Activation): assert layer.activation in [relu6, relu, softmax] queue = None if queues is None else queues[queue_ctr] queue_ctr += 1 act_func = "relu6" if layer.activation == relu6 else "relu" if layer.activation == relu else "softmax" if slalom and isinstance(next_layer, GlobalAveragePooling2D): #assert layer.activation == relu6 act_func = "avgpoolrelu6" skip_next = True maxpool_params = None if slalom and (isinstance(next_layer, MaxPooling2D) or isinstance(next_layer, AveragePooling2D)): mp = next_layer assert (layer.activation == relu) maxpool_params = mp.get_config() skip_next = True new_layers.append(ActivationQ(act_func, bits_w, bits_x, log=log, maxpool_params=maxpool_params, quantize=quantize, slalom=slalom, slalom_integrity=slalom_integrity, slalom_privacy=slalom_privacy, sgxutils=sgxutils, queue=queue)) elif isinstance(layer, ZeroPadding2D): if quantize: # merge with next layer conv = next_layer assert isinstance(conv, Conv2D) or isinstance(conv, DepthwiseConv2D) assert conv.padding == 'valid' conv.padding = 'same' else: new_layers.append(ZeroPadding2D.from_config(layer.get_config())) elif isinstance(layer, Flatten): if not verif_preproc: new_layers.append(Flatten.from_config(layer.get_config())) elif isinstance(layer, GlobalAveragePooling2D): assert not slalom conf = layer.get_config() conf['bits_w'] = bits_w conf['bits_x'] = bits_x conf['log'] = log conf['quantize'] = quantize new_layers.append(GlobalAveragePooling2DQ.from_config(conf)) elif isinstance(layer, Reshape): new_layers.append(Reshape.from_config(layer.get_config())) elif isinstance(layer, Dropout): pass elif isinstance(layer, ResNetBlock): #assert not slalom path1 = [] path2 = [] for l in layer.path1: lq, queue_ctr, _, _ = transform_layer(l, None, queue_ctr, flattened) path1.extend(lq) for l in layer.path2: lq, queue_ctr, _, _ = transform_layer(l, None, queue_ctr, flattened) path2.extend(lq) [actq], queue_ctr, flattened, skip_next = transform_layer(layer.merge_act, next_layer, queue_ctr, flattened) new_layer = ResNetBlock(layer.kernel_size, layer.filters, layer.stage, layer.block, layer.identity, layer.strides, path1=path1, path2=path2, merge_act=actq, quantize=quantize, bits_w=bits_w, bits_x=bits_x, slalom=slalom, slalom_integrity=slalom_integrity, slalom_privacy=slalom_privacy) new_layers.append(new_layer) else: raise AttributeError("Don't know how to handle layer {}".format(layer)) return new_layers, queue_ctr, flattened, skip_next
def PrunWeight(model, model_name, x_prune, y_prune, x_test, y_test, pruning_rate, compile_info , fine_tune): ############ Calculating weight limit for pruning ##### ############ We do not consider biases in the pruning process ##### parameters = [] conv_layers_weights = [] for layer in model.layers: if layer.get_config()['name'].find("conv") != -1: conv_layers_weights.append(layer.get_weights()) for _, layer_weights in enumerate(conv_layers_weights): parameters.append(K.flatten(K.abs(layer_weights[0]))) dense_layers_weights = [] for layer in model.layers: if layer.get_config()['name'].find("dense") != -1: dense_layers_weights.append(layer.get_weights()) for _, layer_weights in enumerate(dense_layers_weights): parameters.append(K.flatten(K.abs(layer_weights[0]))) parameters = K.concatenate(parameters) parameters = sorted(K.get_value(parameters).tolist()) weight_limit = parameters[int(pruning_rate*len(parameters))] print("Pruning weight threshhold : ", weight_limit) ################################################################## dense_layers_weights = [] conv_filter_weights = [] batch_norm_params = [] kernel_masks_for_dense_and_conv_layers = [] model_tensors_dict = {} input_height,input_width,input_channels = model.input.shape[1:] pruned_model_input = Input(shape=(int(input_height),int(input_width),int(input_channels))) if model.layers[0].name.find('input') == -1: model_tensors_dict[str(model.layers[0].input.name)] = pruned_model_input else: model_tensors_dict[str(model.layers[0].output.name)] = pruned_model_input Flow = pruned_model_input for _,layer in enumerate(model.layers): if layer.get_config()['name'].find("conv2d") != -1: kernel_mask = K.cast(weight_limit <= K.abs(layer.get_weights()[0]) ,'float32') kernel_masks_for_dense_and_conv_layers.append(kernel_mask) Flow = MaskedConv2D(filters=layer.get_config()['filters'], kernel_size=layer.get_config()['kernel_size'],kernel_initializer=layer.get_config()['kernel_initializer'], kernel_regularizer= layer.get_config()['kernel_regularizer'], strides=layer.get_config()['strides'], padding=layer.get_config()['padding'], activation=layer.get_config()['activation'], use_bias=layer.get_config()['use_bias'], Masked=True , kernel_mask_val=kernel_mask)(model_tensors_dict[str(layer.input.name)]) conv_filter_weights.append(layer.get_weights()) model_tensors_dict[str(layer.output.name)] = Flow elif layer.get_config()['name'].find("dense") != -1: kernel_mask = K.cast(weight_limit <= K.abs(layer.get_weights()[0]) ,'float32') kernel_masks_for_dense_and_conv_layers.append(kernel_mask) Flow = MaskedDense(units=layer.get_config()['units'], activation=layer.get_config()['activation'], use_bias=layer.get_config()['use_bias'], kernel_initializer = layer.get_config()['kernel_initializer'], Masked=True , kernel_mask_val=kernel_mask)(model_tensors_dict[str(layer.input.name)]) dense_layers_weights.append(layer.get_weights()) model_tensors_dict[str(layer.output.name)] = Flow elif layer.get_config()['name'].find("activation") != -1: Flow = Activation.from_config(layer.get_config())(model_tensors_dict[str(layer.input.name)]) model_tensors_dict[str(layer.output.name)] = Flow elif layer.get_config()['name'].find("max_pooling") != -1: Flow = MaxPooling2D.from_config(layer.get_config())(model_tensors_dict[str(layer.input.name)]) model_tensors_dict[str(layer.output.name)] = Flow elif layer.get_config()['name'].find("average_pooling") != -1: Flow = AveragePooling2D.from_config(layer.get_config())(model_tensors_dict[str(layer.input.name)]) model_tensors_dict[str(layer.output.name)] = Flow elif layer.get_config()['name'].find("dropout") != -1: Flow = Dropout.from_config(layer.get_config())(model_tensors_dict[str(layer.input.name)]) model_tensors_dict[str(layer.output.name)] = Flow elif layer.get_config()['name'].find("flatten") != -1: Flow = Flatten.from_config(layer.get_config())(model_tensors_dict[str(layer.input.name)]) model_tensors_dict[str(layer.output.name)] = Flow elif layer.get_config()['name'].find("add") != -1: input_tensors_list = [] for idx in range(len(layer.input)): input_tensors_list.append(model_tensors_dict[layer.input[idx].name]) Flow = add(input_tensors_list) model_tensors_dict[str(layer.output.name)] = Flow elif layer.get_config()['name'].find("batch_normalization") != -1: batch_norm_params.append(layer.get_weights()) Flow = BatchNormalization.from_config(layer.get_config())(model_tensors_dict[str(layer.input.name)]) model_tensors_dict[str(layer.output.name)] = Flow elif layer.get_config()['name'].find("input") != -1: pass pruned_model = Model(pruned_model_input, Flow) ########################## setting the weight s of layers ############################# for layer in pruned_model.layers: if layer.get_config()['name'].find("dense") != -1: pruned_weights = [dense_layers_weights[0][0]*K.get_value(kernel_masks_for_dense_and_conv_layers[0])] if layer.get_config()['use_bias']: pruned_weights.append(dense_layers_weights[0][1]) layer.set_weights(pruned_weights) del kernel_masks_for_dense_and_conv_layers[0] del dense_layers_weights[0] elif layer.get_config()['name'].find("conv2d") != -1: pruned_weights = [conv_filter_weights[0][0]*K.get_value(kernel_masks_for_dense_and_conv_layers[0])] if layer.get_config()['use_bias']: pruned_weights.append(conv_filter_weights[0][1]) layer.set_weights(pruned_weights) del kernel_masks_for_dense_and_conv_layers[0] del conv_filter_weights[0] elif layer.get_config()['name'].find("batch") != -1: layer.set_weights(batch_norm_params[0]) del batch_norm_params[0] ############################### Fine-tuning ############################################ pruned_model.compile(loss=compile_info['loss'], optimizer=compile_info['optimizer'], metrics=compile_info['metrics']) if not fine_tune: return pruned_model else: early_stopping = EarlyStopping(monitor='val_acc', patience=2,verbose=0) callbacks = [early_stopping] # fine-tuning the network. pruned_model.fit(x_prune, y_prune, batch_size=256, epochs=10, validation_data=(x_test, y_test), shuffle=True, callbacks=callbacks, verbose=0 ) return pruned_model