def __init__(self, tarball_path):
        # """Creates and loads pretrained deeplab model."""
        self.graph = tf.Graph()
        graph_def = None
        # Extract frozen graph from tar archive.
        tar_file = tarfile.open(tarball_path)
        for tar_info in tar_file.getmembers():
            if self.FROZEN_GRAPH_NAME in os.path.basename(tar_info.name):
                file_handle = tar_file.extractfile(tar_info)
                graph_def = GraphDef.FromString(file_handle.read())
                break

        tar_file.close()

        if graph_def is None:
            raise RuntimeError('Cannot find inference graph in tar archive.')

        with self.graph.as_default():
            tf.import_graph_def(graph_def, name = '')

        self.sess = Session(graph = self.graph)
    executor.benchmark_layout_transform(min_exec_num=exec_num)
    executor.run()
    executor.write_opt_sch2record_file(opt_sch_file)


if __name__ == '__main__':
    # logging.getLogger('autotvm').setLevel(logging.DEBUG)
    # 1.load img
    img_path = '/opt/dataset/tr2_cropped/data/1.png'
    image = Image.open(img_path).resize((1024, 1024))
    x = np.array(image)
    # 2.load graph
    GRAPH_PB_PATH = './frozen'
    # graph_def = tf.compat.v1.get_default_graph().as_graph_def(add_shapes=True)
    with tf.io.gfile.GFile('./frozen/frozen_model_fixed.pb', 'rb') as f:
        graph_def = GraphDef.FromString(f.read())
        #call the utility to import the graph definition into default graph.
        graph_def = tf_testing.ProcessGraphDefParam(graph_def)

    # 3. tvm frontend
    shape_dict = {"input_1": (1, 1024, 1024, 3)}  # change shape
    mod, params = relay.frontend.from_tensorflow(graph_def, shape_dict)
    # meta-data部份
    data_shape = (1, 1024, 1024, 3)  # maybe
    output_shape = (1, 1024, 1024, 4)
    batch_size = 1
    dtype = "float32"
    model_name = "unet_cpu_12_thread"
    log_file = "%s.log" % model_name
    graph_opt_sch_file = "%s_graph_opt_1000.log" % model_name
    input_name = "x"  #这是和后面的输入名字一样的