def test_bias_update_to_dense(self): """ test bias correction on matmul layer :return: """ tf.compat.v1.reset_default_graph() inputs = tf.keras.Input(shape=(32, 32, 3,)) x = tf.keras.layers.Flatten()(inputs) dense = tf.keras.layers.Dense(2, use_bias=False, activation=tf.nn.softmax, name="single_residual")(x) # pylint: disable=no-member _ = tf.nn.relu(dense) init = tf.compat.v1.global_variables_initializer() sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph()) sess.run(init) dense_op = sess.graph.get_operation_by_name('single_residual/MatMul') self.assertTrue(BiasUtils.is_bias_none(dense_op)) new_sess = BiasUtils.initialize_model_with_bias(sess, ['input_1'], ['Relu']) dense_op = new_sess.graph.get_operation_by_name('single_residual/MatMul') self.assertTrue(not BiasUtils.is_bias_none(dense_op)) new_sess.close()
def bias_correction_single_layer_empirical(dataset: tf.data.Dataset): """ perform bias correction on one layer """ # load a model tf.keras.backend.clear_session() _ = ResNet50(weights='imagenet', input_shape=(224, 224, 3)) sess = tf.compat.v1.keras.backend.get_session() # input parameters for bias correction # populate required parameters in two data types QuantParams and BiasCorrectParams quant_params = QuantParams(quant_mode='tf_enhanced', round_mode='nearest', use_cuda=True, ops_to_ignore=None) bias_correction_params = BiasCorrectionParams( batch_size=1, num_quant_samples=10, num_bias_correct_samples=10, input_op_names=['input_1'], output_op_names=['fc1000/Softmax']) with sess.as_default(): # initialize model with zero bias sess = BiasUtils.initialize_model_with_bias( sess, bias_correction_params.input_op_names, bias_correction_params.output_op_names) # pick a layer for bias correction example_conv_layer = sess.graph.get_operation_by_name( 'res2a_branch2a/Conv2D') # invoke bias correction of one layer BiasCorrection.bias_correction_per_layer( reference_model=sess, corrected_model=sess, bias_correct_params=bias_correction_params, layer_name_to_be_corrected=example_conv_layer.name, quant_params=quant_params, data_set=dataset) sess.close()
def bias_correction_single_layer_analytical(): """ perform analytical bias correction on one layer """ # load a model tf.keras.backend.clear_session() _ = ResNet50(weights='imagenet', input_shape=(224, 224, 3)) sess = tf.compat.v1.keras.backend.get_session() # input parameters for bias correction # populate required parameters in two data types QuantParams and BiasCorrectParams quant_params = QuantParams(quant_mode='tf_enhanced', round_mode='nearest', use_cuda=True, ops_to_ignore=None) with sess.as_default(): # initialize model with zero bias sess = BiasUtils.initialize_model_with_bias(sess, ['input_1'], ['fc1000/Softmax']) # pick a layer for bias correction example_conv_layer = sess.graph.get_operation_by_name( 'res2a_branch2a/Conv2D') # get candidate conv bns in the model convs_bn_activation_info_dict = BiasCorrection.find_all_convs_bn_with_activation( sess, ['input_1'], ['fc1000/Softmax']) # make sure to pick example_conv_layer that has a bn op associated with it if example_conv_layer in convs_bn_activation_info_dict.keys(): preceding_bn_layer_info = convs_bn_activation_info_dict[ example_conv_layer] # invoke analytical bias correction on this layer BiasCorrection.analytical_bias_correction_per_layer( sess, example_conv_layer, preceding_bn_layer_info, quant_params) sess.close()
def test_initialize_with_bias_with_detached_ops(self): """ Test that initialize with bias only affects valid ops """ tf.compat.v1.reset_default_graph() sess = tf.compat.v1.Session() inputs = tf.keras.Input(shape=(32, 32, 3,)) conv1 = tf.keras.layers.Conv2D(32, (3, 3), use_bias=False)(inputs) _ = tf.keras.layers.Conv2D(16, (2, 2), activation=tf.nn.tanh, use_bias=False)(conv1) _ = tf.keras.layers.Conv2D(8, (2, 2), activation=tf.nn.tanh)(conv1) graph_editor.detach_inputs(sess.graph.get_operation_by_name('conv2d_1/Conv2D')) init = tf.compat.v1.global_variables_initializer() sess.run(init) # Check that outputs of conv2d and conv2d_1 have no biases self.assertTrue(sess.graph.get_operation_by_name('conv2d/Conv2D').outputs[0].consumers()[0].type != 'BiasAdd') self.assertTrue(sess.graph.get_operation_by_name('conv2d_1/Conv2D').outputs[0].consumers()[0].type != 'BiasAdd') sess = BiasUtils.initialize_model_with_bias(sess, ['input_1'], ['conv2d_2/BiasAdd']) # Check that conv2d has a bias inserted but not conv2d_1 self.assertTrue(sess.graph.get_operation_by_name('conv2d/Conv2D').outputs[0].consumers()[0].type == 'BiasAdd') self.assertTrue(sess.graph.get_operation_by_name('conv2d_1/Conv2D').outputs[0].consumers()[0].type != 'BiasAdd')
def correct_bias(reference_model: tf.compat.v1.Session, bias_correct_params: BiasCorrectionParams, quant_params: QuantParams, data_set: tf.data.Dataset, conv_bn_dict: Union[Dict[tf.Operation, ConvBnInfoType], None] = None, perform_only_empirical_bias_corr: bool = True): """ Top level function for bias correction :param reference_model: active tf.compat.v1.Session for the model to be corrected. :param bias_correct_params: input params for bias correction :param quant_params: QuantParams type with params for quantization simulation for bias correction. :param data_set: input data set :param conv_bn_dict: Dict of conv and bn with activation info. If None, the function looks for it. This can be obtained on the model with bns and convs using BiasCorrection.find_all_convs_bn_with_activation() api. :param perform_only_empirical_bias_corr: a flag to indicate only empirical bias correction is to be performed. :return: updated session with corrected bias for given ops """ # one time initialization of all layers with bias param reference_model = BiasUtils.initialize_model_with_bias( reference_model, bias_correct_params.input_op_names, bias_correct_params.output_op_names) # Create a copy of the model as reference model corrected_model = save_and_load_graph('./temp_meta_path', reference_model) # get all ordered convs/ linears and skip gradient ops ordered_conv_linears = get_ordered_conv_linears( reference_model, bias_correct_params.input_op_names, bias_correct_params.output_op_names) # Get conv2D, depthwise with preceding BN ops info for analytical bias correction # if user has not passed any dictionary if conv_bn_dict is None: convs_bn_activation_info_dict = BiasCorrection.find_all_convs_bn_with_activation( reference_model, bias_correct_params.input_op_names, bias_correct_params.output_op_names) else: convs_bn_activation_info_dict = BiasCorrection.refresh_op_ref( reference_model, conv_bn_dict) # Perform analytical bias correction for first conv layer # we always perform empirical bias correction for linear layers if ordered_conv_linears: if not perform_only_empirical_bias_corr and ordered_conv_linears[ 0].type not in ['MatMul']: first_conv = ordered_conv_linears.pop(0) BiasCorrection.analytical_bias_correction_per_layer( corrected_model, first_conv, None, quant_params, is_first_conv=True) # for each candidate layer in an ordered list of conv/lieanr ops # find the corresponding bn and activation info for layer in ordered_conv_linears: # if this layer is in selected patterns of convs with preceding BN op and # if empirical flag is false # perform analytical Bias correction if layer in convs_bn_activation_info_dict.keys( ) and not perform_only_empirical_bias_corr: preceding_bn_layer_info = convs_bn_activation_info_dict[layer] BiasCorrection.analytical_bias_correction_per_layer( corrected_model, layer, preceding_bn_layer_info, quant_params) else: # stand-alone convs/ linears or when perform_only_empirical_bias_corr is set to True # perform empirical bias correction BiasCorrection.bias_correction_per_layer( reference_model, corrected_model, bias_correct_params, layer.name, quant_params, data_set) logger.info('Completed bias correction') return corrected_model