def model(is_training):
    _IMAGE_SIZE = 32
    _IMAGE_CHANNELS = 3
    _NUM_CLASSES = 10


    with open(graph_file, 'r') as f:
        graph = json.load(f)
    params = np.load(param_file, allow_pickle=True)['arr_0'][()]
    params = transform_weight_from_mxnet_to_tensorflow(params)

    with tf.name_scope('main_params'):
        x = tf.placeholder(tf.float32, shape=[None, _IMAGE_SIZE * _IMAGE_SIZE * _IMAGE_CHANNELS], name='Input')
        y = tf.placeholder(tf.float32, shape=[None, _NUM_CLASSES], name='Output')
        x_image = tf.reshape(x, [-1, _IMAGE_SIZE, _IMAGE_SIZE, _IMAGE_CHANNELS], name='images')

        global_step = tf.Variable(initial_value=0, trainable=False, name='global_step')
        learning_rate = tf.placeholder(tf.float32, shape=[], name='learning_rate')

    with tf.variable_scope('resnet50') as scope:
        trc = TFReconstructorTrain(graph, params, output_node_ids=['502'])
        featmap = trc.model(is_training=is_training, input_dict={'0': x_image})

    with tf.variable_scope('fully_connected') as scope:
        flat = tf.reshape(featmap['502'], [-1, 2048])
        softmax = tf.layers.dense(inputs=flat, units=_NUM_CLASSES, name=scope.name)

    y_pred_cls = tf.argmax(softmax, axis=1)

    return x, y, softmax, y_pred_cls, global_step, learning_rate
def create_model():

    with open('resnet50_v1b/IR_fused_for_CModel_graph.json', 'r') as f:
        jgraph = json.load(f)
    params = np.load('resnet50_v1b/IR_fused_for_CModel_params.npz', allow_pickle=True)['arr_0'][()]
    params = src.transform_weight_from_mxnet_to_tensorflow(params)
    trc = src.TFReconstructor(jgraph, params)
    trc._execute()

    x = tf.get_default_graph().get_tensor_by_name("0:0")
    y = tf.placeholder(tf.int64, (None,))
    logits = tf.layers.flatten(tf.get_default_graph().get_tensor_by_name("506:0"))

    op_names = []
    in_tensors = []
    out_tensors = []
    kernel_list = []
    bias_list = []

    for operation in tf.get_default_graph().get_operations():
        if operation.type == 'Conv2D':
            in_tensors.append(operation.inputs._inputs[0])
            out_tensors.append(operation.outputs[0])
            kernel_list.append(operation.inputs._inputs[1])
            bias_list.append(None)
            op_names.append(operation.inputs._inputs[1].name)

    # replace all the kernel by variable
    kernel_name_val_mapping = {kernel.name: tf.make_ndarray(kernel.op.get_attr('value')) for kernel in kernel_list}
    tf.io.write_graph(tf.get_default_graph(), './', 'temp.pb', as_text=False)
    tf.reset_default_graph()
    graphdef = tf.GraphDef()
    with open('./temp.pb', 'rb') as f:
        graphdef.ParseFromString(f.read())

    kernel_var_mapping = {name: tf.Variable(val) for name, val in kernel_name_val_mapping.items()}
    tf.graph_util.import_graph_def(graphdef, input_map=kernel_var_mapping)
    x = tf.get_default_graph().get_tensor_by_name("import/0:0")
    y = tf.placeholder(tf.int64, (None, ))
    logits = tf.layers.flatten(tf.get_default_graph().get_tensor_by_name("import/506:0"))
    kernel_list = [kernel_var_mapping[kernel.name] for kernel in kernel_list]

    op_names = []
    in_tensors = []
    out_tensors = []
    # kernel_list = []
    bias_list = []

    for operation in tf.get_default_graph().get_operations():
        if operation.type == 'Conv2D':
            in_tensors.append(operation.inputs._inputs[0])
            out_tensors.append(operation.outputs[0])
            # kernel_list.append(operation.inputs._inputs[1])
            bias_list.append(None)
            op_names.append(operation.inputs._inputs[1].name)
    return x, y, logits, op_names, kernel_list, bias_list, in_tensors, out_tensors
예제 #3
0
def get_graph(graph, params, config):
    params = src.transform_weight_from_mxnet_to_tensorflow(params)
    tf_graph = tf.Graph()
    with tf_graph.as_default():
        quan_instance = src.get_quan(config.QUAN.strategy, config.QUAN.qconfig,
                                     graph, params)
        quan_instance.execute()
    input_tensor = quan_instance.node_dict[config.EVALUATION.input_node]
    output_tensor = quan_instance.node_dict[config.EVALUATION.output_node]
    return tf_graph, input_tensor, output_tensor
def run(graph, params, config, evaluation, save_path):
    params = src.transform_weight_from_mxnet_to_tensorflow(params)

    tf_graph = tf.Graph()
    with tf_graph.as_default():
        quan_instance = src.get_quan(config.strategy, config.qconfig, graph,
                                     params)
        quan_instance.execute()

    with tf.Session(graph=tf_graph) as sess:
        input_arr = src.load_images(evaluation.input_images)
        # input_arr = src.load_cifar10(evaluation.input_images)
        output_arr = sess.run(
            quan_instance.node_dict[evaluation.output_node],
            feed_dict={
                quan_instance.node_dict[evaluation.input_node]: input_arr
            })
    if save_path:
        tf.io.write_graph(tf_graph,
                          os.path.dirname(save_path),
                          os.path.basename(save_path),
                          as_text=False)
    return output_arr, quan_instance.act_list
예제 #5
0
                        '--preprocess-config',
                        default='configs/mxnet_imagenet_trans.json',
                        help='json config of preprocess')
    parser.add_argument('-o',
                        '--output',
                        default='calibrations/resnet50_v1b.json')
    parser.add_argument('--use-kl',
                        action='store_true',
                        default=False,
                        help='flag of use kl')
    args = parser.parse_args()

    with open(args.graph_json, 'r') as f:
        json_graph = json.load(f)
    params = np.load(args.params_file, allow_pickle=True)['arr_0'][()]
    params = src.transform_weight_from_mxnet_to_tensorflow(params)
    image_list = src.utils.get_image_list(args.image_dir)

    preprocessor = src.transform.JsonTrans(args.preprocess_config)
    calibrate_dataset = src.CalibDataset(args.image_dir,
                                         25,
                                         transformer=preprocessor)
    calibrator = src.quantization.Calibration(json_graph, params,
                                              calibrate_dataset)

    # table = calibrator.run()
    if args.use_kl:
        table = calibrator.run_kl()
    else:
        table = calibrator.run()
    with open(args.output, 'w') as f: