Exemple #1
0
def bias_correction_after_cle(dataset: tf.data.Dataset):
    """
    Perform bias correction on a given model (mix of empirical and analytical) after
    cross layer equalization.
    :param dataset: Data passed by user as tf.Dataset type.
    :return: None
    """

    # load a model
    tf.keras.backend.clear_session()
    _ = ResNet50(weights='imagenet', input_shape=(224, 224, 3))
    sess = tf.compat.v1.keras.backend.get_session()

    # input parameters for bias correction
    # populate required parameters in two data types QuantParams and BiasCorrectParams

    quant_params = QuantParams(quant_mode='tf_enhanced',
                               round_mode='nearest',
                               use_cuda=True,
                               ops_to_ignore=None)

    bias_correction_params = BiasCorrectionParams(
        batch_size=1,
        num_quant_samples=10,
        num_bias_correct_samples=10,
        input_op_names=['input_1'],
        output_op_names=['fc1000/Softmax'])

    with sess.as_default():

        # store conv bns info before performing CLE
        conv_bn_dict = BiasCorrection.find_all_convs_bn_with_activation(
            sess,
            start_op_names=['input_1'],
            output_op_names=['fc1000/Softmax'])

        # perform CLE
        sess_after_cle = equalize_model(sess,
                                        start_op_names=['input_1'],
                                        output_op_names=['fc1000/Softmax'])

        # run empirical and analytical bias correction on the model
        _new_session = BiasCorrection.correct_bias(
            sess_after_cle,
            bias_correction_params,
            quant_params,
            dataset,
            conv_bn_dict=conv_bn_dict,
            perform_only_empirical_bias_corr=False)
    sess.close()
Exemple #2
0
def cross_layer_equalization_auto():
    """ perform auto cross layer equalization """

    # load a model
    tf.keras.backend.clear_session()
    _ = ResNet50(weights='imagenet', input_shape=(224, 224, 3))
    sess = tf.compat.v1.keras.backend.get_session()

    # get starting op name to invoke api for cle
    input_op_name = 'input_1'
    output_op_name = 'fc1000/Softmax'

    # Equalize a model with Batchnorms
    # Performs BatchNorm fold, replacing Relu6 with Relu, Cross layer scaling and High bias fold
    # use the new session returned for further evaluations on TF graph
    with sess.as_default():
        new_session = equalize_model(sess, input_op_name, output_op_name)
    sess.close()
    def test_equalize_with_custom_model_no_bias(self):
        """
        Test equalize with a custom model with conv without bias param
        """
        tf.compat.v1.reset_default_graph()

        sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph())

        with sess.as_default():
            inputs = tf.keras.Input(shape=(
                32,
                32,
                3,
            ))

            conv_op = tf.keras.layers.Conv2D(32, (3, 3),
                                             use_bias=False)(inputs)
            bn_op = tf.keras.layers.BatchNormalization(fused=True)(conv_op)
            relu_1 = tf.nn.relu(bn_op)

            conv2_op = tf.keras.layers.Conv2D(32, (3, 3),
                                              use_bias=False)(relu_1)
            bn_op_2 = tf.keras.layers.BatchNormalization(fused=True)(
                conv2_op, training=False)
            relu_2 = tf.nn.relu(bn_op_2)

            init = tf.compat.v1.global_variables_initializer()
            sess.run(init)

            old_conv_op = sess.graph.get_operation_by_name('conv2d/Conv2D')
            self.assertTrue(BiasUtils.is_bias_none(old_conv_op))

            conv_op = sess.graph.get_operation_by_name('conv2d/Conv2D')
            new_sess = equalize_model(sess, conv_op.inputs[0].op.name,
                                      'Relu_1')

            new_conv_op = new_sess.graph.get_operation_by_name('conv2d/Conv2D')
            bias = BiasUtils.get_bias_as_numpy_data(new_sess, new_conv_op)
            self.assertFalse(BiasUtils.is_bias_none(new_conv_op))
        sess.close()
    def test_equalize_fold_forward(self):
        """
        Test equalize on a model with a forward bn fold
        """
        tf.compat.v1.reset_default_graph()
        inputs = tf.keras.Input(shape=(
            32,
            32,
            3,
        ), name="inputs")
        conv_op = tf.keras.layers.Conv2D(32, (3, 3))(inputs)
        r_op = tf.nn.relu(conv_op)
        bn_op = tf.keras.layers.BatchNormalization(fused=True)(r_op)
        conv2_op = tf.keras.layers.Conv2D(32, (3, 3))(bn_op)
        conv3_op = tf.keras.layers.Conv2D(32, (3, 3))(conv2_op)
        _ = tf.nn.relu(conv3_op)

        init = tf.compat.v1.global_variables_initializer()
        sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph())
        sess.run(init)
        old_conv_op = sess.graph.get_operation_by_name('conv2d/Conv2D')
        conv_bias_data_before_fold = BiasUtils.get_bias_as_numpy_data(
            sess, old_conv_op)

        conv_op = sess.graph.get_operation_by_name('conv2d/Conv2D')

        new_sess = equalize_model(sess, conv_op.inputs[0].op.name, 'Relu_1')
        new_conv_op = new_sess.graph.get_operation_by_name('conv2d/Conv2D')
        self.assertFalse(BiasUtils.is_bias_none(new_conv_op))
        conv_bias_data_after_fold = BiasUtils.get_bias_as_numpy_data(
            new_sess, new_conv_op)

        for i in range(len(conv_bias_data_before_fold)):
            self.assertTrue(
                conv_bias_data_before_fold[i] <= conv_bias_data_after_fold[i])

        sess.close()
def main(args):
    # configuration for efficient use of gpu
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    print('Loading srgan generator...')
    gen_graph = tf.Graph()
    with gen_graph.as_default():
        gen_sess = tf.Session(config=config, graph=gen_graph)
        with gen_sess.as_default():
            srgan_generator = generator()
            srgan_generator.load_weights(args.weights_path)

    # sort files by filenames, assuming names match in both paths
    lr_images_files = sorted(
        glob.glob(os.path.join(args.images_path, '*LR.png')))
    hr_images_files = sorted(
        glob.glob(os.path.join(args.images_path, '*HR.png')))

    # check if number of images align
    if len(lr_images_files) != len(hr_images_files):
        raise RuntimeError('length of image files doesn`t match,'
                           'need same number of images for both'
                           'low resolution and high resolution!')

    image_files = (lr_images_files, hr_images_files)

    # two list of metrics on all images
    psnr_vals, ssim_vals = evaluate_session(gen_sess, image_files,
                                            srgan_generator.input.name,
                                            srgan_generator.output.name)
    psnr_val = np.mean(psnr_vals)
    ssim_val = np.mean(ssim_vals)
    print(
        f'Mean PSNR and SSIM for given images on original model are: [{psnr_val}, {ssim_val}]'
    )

    # TODO: use a better default dataset for compute encodings when not given by users
    # use low resolution images if no representative lr data are provided

    # use low and high resolution images if no representative lr and hr data are provided
    if args.representative_datapath:
        bc_lr_data = glob.glob(
            os.path.join(args.representative_datapath, '*LR.png'))
        comp_encodings_lr_data = glob.glob(
            os.path.join(args.representative_datapath, '*LR.png'))
        comp_encodings_hr_data = glob.glob(
            os.path.join(args.representative_datapath, '*HR.png'))
    else:
        warnings.warn(
            'No representative input data are given,'
            'bias correction and computation of encodings will be done'
            'on part of all of the low resolution images!')
        bc_lr_data = lr_images_files

        warnings.warn('No representative reference data are given,'
                      'computation of encodings will be done'
                      'on part of all of the high resolution images!')
        comp_encodings_lr_data = lr_images_files
        comp_encodings_hr_data = hr_images_files

    comp_encodings_data = (comp_encodings_lr_data, comp_encodings_hr_data)

    if args.cross_layer_equalization:
        print('Applying cross layer equalization (CLE) to session...')
        gen_sess = equalize_model(
            gen_sess,
            start_op_names=srgan_generator.input.op.name,
            output_op_names=srgan_generator.output.op.name)

    if args.bias_correction:
        print('Applying Bias Correction (BC) to session...')
        # the dataset being evaluated might have varying image sizes
        # so right now only use batch size 1
        batch_size = 1
        num_imgs = len(bc_lr_data)

        quant_params = QuantParams(use_cuda=args.use_cuda,
                                   quant_mode=args.quant_scheme)
        bias_correction_params = BiasCorrectionParams(
            batch_size=batch_size,
            num_quant_samples=min(num_imgs, args.num_quant_samples),
            num_bias_correct_samples=min(num_imgs,
                                         args.num_bias_correct_samples),
            input_op_names=[srgan_generator.input.op.name],
            output_op_names=[srgan_generator.output.op.name])

        ds = make_dataset(bc_lr_data)
        ds = ds.batch(batch_size)

        gen_sess = BiasCorrection.correct_bias(gen_sess,
                                               bias_correction_params,
                                               quant_params, ds)

    # creating quantsim object which inserts quantizer ops
    sim = quantsim.QuantizationSimModel(
        gen_sess,
        starting_op_names=[srgan_generator.input.op.name],
        output_op_names=[srgan_generator.output.op.name],
        quant_scheme=args.quant_scheme,
        default_output_bw=args.default_output_bw,
        default_param_bw=args.default_param_bw)

    # compute activation encodings
    # usually achieves good results when data being used for computing
    # encodings are representative of its task
    partial_eval = partial(evaluate_session,
                           input_name=srgan_generator.input.name,
                           output_name='lambda_3/mul_quantized:0')
    sim.compute_encodings(partial_eval, comp_encodings_data)

    psnr_vals, ssim_vals = evaluate_session(sim.session,
                                            image_files,
                                            srgan_generator.input.name,
                                            'lambda_3/mul_quantized:0',
                                            output_dir=args.output_dir)
    psnr_val = np.mean(psnr_vals)
    ssim_val = np.mean(ssim_vals)

    print(
        f'Mean PSNR and SSIM for given images on quantized model are: [{psnr_val}, {ssim_val}]'
    )
Exemple #6
0
def run_evaluation(args):
    # Build graph definition
    with tf.Graph().as_default():
        # Create iterator
        tf_records = glob(args.dataset_dir + '/validation*')
        preprocessing_fn = preprocessing_factory.get_preprocessing(
            args.model_name, is_training=False)
        parse_function = wrap_preprocessing(preprocessing_fn,
                                            height=args.image_size,
                                            width=args.image_size,
                                            num_classes=(1001 -
                                                         args.labels_offset),
                                            labels_offset=args.labels_offset)

        dataset = tf.data.TFRecordDataset(tf_records).repeat(1)
        dataset = dataset.map(parse_function, num_parallel_calls=1).apply(
            tf.contrib.data.batch_and_drop_remainder(args.batch_size))
        iterator = dataset.make_initializable_iterator()
        images, labels = iterator.get_next()

        network_fn = nets_factory.get_network_fn(
            args.model_name,
            num_classes=(1001 - args.labels_offset),
            is_training=False)
        with tf.device('/cpu:0'):
            images = tf.placeholder_with_default(images,
                                                 shape=(None, args.image_size,
                                                        args.image_size, 3),
                                                 name='input')
            labels = tf.placeholder_with_default(labels,
                                                 shape=(None, 1001 -
                                                        args.labels_offset),
                                                 name='labels')
        logits, end_points = network_fn(images)
        confidences = tf.nn.softmax(logits, axis=1, name='confidences')
        categorical_preds = tf.argmax(confidences,
                                      axis=1,
                                      name='categorical_preds')
        categorical_labels = tf.argmax(labels,
                                       axis=1,
                                       name='categorical_labels')
        correct_predictions = tf.equal(categorical_labels, categorical_preds)
        top1_acc = tf.reduce_mean(tf.cast(correct_predictions, tf.float32),
                                  name='top1-acc')
        top5_acc = tf.reduce_mean(tf.cast(
            tf.nn.in_top_k(predictions=confidences,
                           targets=tf.cast(categorical_labels, tf.int32),
                           k=5), tf.float32),
                                  name='top5-acc')

        saver = tf.train.Saver()
        sess = tf.Session()

        # Load model from checkpoint
        if not args.ckpt_bn_folded:
            saver.restore(sess, args.checkpoint_path)
        else:
            sess.run(tf.global_variables_initializer())

    # Fold all BatchNorms before QuantSim
    sess, folded_pairs = fold_all_batch_norms(sess, ['IteratorGetNext'],
                                              [logits.name[:-2]])

    if args.ckpt_bn_folded:
        with sess.graph.as_default():
            saver = tf.train.Saver()
            saver.restore(sess, args.checkpoint_path)
    else:
        # Do Cross Layer Equalization and Bias Correction if not loading from a batchnorm folded checkpoint
        sess = equalize_model(sess, ['input'], [logits.op.name])
        conv_bn_dict = BiasCorrection.find_all_convs_bn_with_activation(
            sess, ['input'], [logits.op.name])
        quant_params = QuantParams(quant_mode=args.quant_scheme)
        bias_correction_dataset = tf.data.TFRecordDataset(tf_records).repeat(1)
        bias_correction_dataset = bias_correction_dataset.map(
            lambda x: parse_function(x)[0], num_parallel_calls=1).apply(
                tf.contrib.data.batch_and_drop_remainder(args.batch_size))
        bias_correction_params = BiasCorrectionParams(
            batch_size=args.batch_size,
            num_quant_samples=10,
            num_bias_correct_samples=512,
            input_op_names=['input'],
            output_op_names=[logits.op.name])

        sess = BiasCorrection.correct_bias(
            reference_model=sess,
            bias_correct_params=bias_correction_params,
            quant_params=quant_params,
            data_set=bias_correction_dataset,
            conv_bn_dict=conv_bn_dict,
            perform_only_empirical_bias_corr=True)

    # Define eval_func to use for compute encodings in QuantSim
    def eval_func(session, iterations):
        cnt = 0
        avg_acc_top1 = 0
        session.run('MakeIterator')
        while cnt < iterations or iterations == -1:
            try:
                avg_acc_top1 += session.run('top1-acc:0')
                cnt += 1
            except:
                return avg_acc_top1 / cnt

        return avg_acc_top1 / cnt

    # Select the right quant_scheme
    if args.quant_scheme == 'range_learning_tf':
        quant_scheme = aimet_common.defs.QuantScheme.training_range_learning_with_tf_init
    elif args.quant_scheme == 'range_learning_tf_enhanced':
        quant_scheme = aimet_common.defs.QuantScheme.training_range_learning_with_tf_enhanced_init
    elif args.quant_scheme == 'tf':
        quant_scheme = aimet_common.defs.QuantScheme.post_training_tf
    elif args.quant_scheme == 'tf_enhanced':
        quant_scheme = aimet_common.defs.QuantScheme.post_training_tf_enhanced
    else:
        raise ValueError("Got unrecognized quant_scheme: " + args.quant_scheme)

    # Create QuantizationSimModel
    sim = QuantizationSimModel(
        session=sess,
        starting_op_names=['IteratorGetNext'],
        output_op_names=[logits.name[:-2]],
        quant_scheme=quant_scheme,
        rounding_mode=args.round_mode,
        default_output_bw=args.default_output_bw,
        default_param_bw=args.default_param_bw,
        config_file=args.quantsim_config_file,
    )

    # Run compute_encodings
    sim.compute_encodings(eval_func,
                          forward_pass_callback_args=args.encodings_iterations)

    # Run final evaluation
    sess = sim.session

    top1_acc = eval_func(sess, -1)
    print('Avg accuracy  Top 1: {}'.format(top1_acc))
    def test_equalize_model_multi_input(self):
        """
        Test bn fold with multiple input nodes
        """

        tf.compat.v1.reset_default_graph()
        tf.set_random_seed(0)
        input1 = tf.keras.Input(name='input1', shape=(10, 10, 3))
        input2 = tf.keras.Input(name='input2', shape=(12, 12, 3))
        x1 = tf.keras.layers.Conv2D(
            8, (1, 1),
            name='conv1a',
            kernel_initializer=tf.random_uniform_initializer(-1, 1),
            bias_initializer='random_uniform')(input1)
        x2 = tf.keras.layers.Conv2D(
            8, (3, 3),
            name='conv1b',
            kernel_initializer=tf.random_uniform_initializer(-1, 1),
            bias_initializer='random_uniform')(x1)
        x3 = tf.keras.layers.Conv2D(
            8, (3, 3),
            name='conv1c',
            kernel_initializer=tf.random_uniform_initializer(-1, 1),
            bias_initializer='random_uniform')(input2)
        x4 = tf.keras.layers.Conv2D(
            8, (3, 3),
            name='conv1d',
            kernel_initializer=tf.random_uniform_initializer(-1, 1),
            bias_initializer='random_uniform')(x3)
        x = tf.keras.layers.add([x2, x4])
        conv2_op = tf.keras.layers.Conv2D(32, (3, 3))(x)
        bn_op = tf.keras.layers.BatchNormalization(fused=True)(conv2_op)
        _ = tf.nn.relu(bn_op)

        init = tf.compat.v1.global_variables_initializer()
        sess = tf.compat.v1.Session()
        sess.run(init)

        conv_1b_before_equalize = sess.graph.get_operation_by_name(
            'conv1b/Conv2D')
        conv_1b_bias_data_before_fold = BiasUtils.get_bias_as_numpy_data(
            sess, conv_1b_before_equalize)
        conv_1d_before_equalize = sess.graph.get_operation_by_name(
            'conv1d/Conv2D')
        conv_1d_bias_data_before_fold = BiasUtils.get_bias_as_numpy_data(
            sess, conv_1d_before_equalize)

        new_sess = equalize_model(sess, ["input1", "input2"], 'Relu')

        conv_1b_after_equalize = new_sess.graph.get_operation_by_name(
            'conv1b/Conv2D')
        conv_1b_bias_data_after_fold = BiasUtils.get_bias_as_numpy_data(
            new_sess, conv_1b_after_equalize)
        conv_1d_after_equalize = new_sess.graph.get_operation_by_name(
            'conv1d/Conv2D')
        conv_1d_bias_data_after_fold = BiasUtils.get_bias_as_numpy_data(
            new_sess, conv_1d_after_equalize)

        for i in range(len(conv_1b_bias_data_after_fold)):
            self.assertTrue(conv_1b_bias_data_after_fold[i] <=
                            conv_1b_bias_data_before_fold[i])

        for i in range(len(conv_1d_bias_data_after_fold)):
            self.assertTrue(conv_1d_bias_data_after_fold[i] <=
                            conv_1d_bias_data_before_fold[i])
        sess.close()
    def test_analytical_empirical_bias_correction(self):
        """
        Test bn based bias correction hybrid with a user passed in dictionary of conv and bn after cle.
        """

        # create a custom model
        tf.compat.v1.reset_default_graph()
        inputs = tf.keras.Input(shape=(
            32,
            32,
            3,
        ), name="inputs")

        conv_op = tf.keras.layers.Conv2D(
            32, (3, 3),
            kernel_initializer=tf.random_uniform_initializer(-1, 1),
            bias_initializer='random_uniform')(inputs)

        conv1_op = tf.keras.layers.Conv2D(
            32, (3, 3),
            kernel_initializer=tf.random_uniform_initializer(-1, 1),
            bias_initializer='random_uniform')(conv_op)

        bn_op = tf.keras.layers.BatchNormalization(
            fused=True,
            beta_initializer='random_uniform',
            gamma_initializer='random_uniform',
            moving_mean_initializer='random_uniform',
            moving_variance_initializer='random_uniform')(conv1_op,
                                                          training=False)

        conv2_op = tf.keras.layers.Conv2D(
            32, (3, 3),
            kernel_initializer=tf.random_uniform_initializer(-1, 1),
            bias_initializer='random_uniform')(bn_op)

        bn_op2 = tf.keras.layers.BatchNormalization(
            fused=True,
            beta_initializer='random_uniform',
            gamma_initializer='random_uniform',
            moving_mean_initializer='random_uniform',
            moving_variance_initializer='random_uniform')(conv2_op,
                                                          training=False)
        relu_1 = tf.nn.relu(bn_op2)
        conv6_op = tf.keras.layers.Conv2D(32, (3, 3))(relu_1)

        _ = tf.nn.relu(conv6_op)

        init = tf.compat.v1.global_variables_initializer()
        sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph())
        sess.run(init)

        output_op = sess.graph.get_operation_by_name('Relu_1')
        input_op_name = "inputs"

        bias_corr_input = BiasCorrectionParams(
            batch_size=1,
            num_quant_samples=10,
            num_bias_correct_samples=10,
            input_op_names=[input_op_name],
            output_op_names=[output_op.name])
        quant_params = QuantParams(use_cuda=False)

        np.random.seed(0)

        input_tensor = sess.graph.get_tensor_by_name('inputs:0')
        shape = input_tensor.shape
        dataset = np.random.rand(1, shape[1], shape[2], shape[3])

        # store conv bns info
        conv_bn_dict = BiasCorrection.find_all_convs_bn_with_activation(
            sess, [input_op_name], [output_op.name])

        # perform CLE
        new_sess = equalize_model(sess, input_op_name, output_op.name)
        conv_with_bn_op = new_sess.graph.get_operation_by_name(
            'conv2d_1/Conv2D')
        old_bias_as_numpy = BiasUtils.get_bias_as_numpy_data(
            new_sess, conv_with_bn_op)

        # perform bias correction and check analytical is performed.
        with unittest.mock.patch(
                'aimet_tensorflow.bias_correction.iter_first_x'
        ) as iter_first_x:
            iter_first_x.return_value = [dataset]
            with unittest.mock.patch(
                    'aimet_tensorflow.bias_correction.BiasCorrection.analytical_bias_correction_per_layer',
                    return_value=sess
            ) as mocked_analytical_bias_correction_per_layer:
                updated_sess = BiasCorrection.correct_bias(
                    new_sess,
                    bias_corr_input,
                    quant_params,
                    dataset,
                    conv_bn_dict=conv_bn_dict,
                    perform_only_empirical_bias_corr=False)

        self.assertEqual(
            mocked_analytical_bias_correction_per_layer.call_count, 3)

        sess.close()
        new_sess.close()