コード例 #1
0
def test():
    # create an empty task but has the correct key we want
    task = Task("yolo1", None, (1, 3, 448, 448, 64, 7, 2, 3, 1, 1), "llvm", 0)
    beg = time.time()
    # s, bufs, configs = schedule(task.key)
    end = time.time()
    # print(tvm.lower(s, bufs, simple_mode=True))
    # print("######################################")
    # print("op schedules:")
    # for config in configs.op_config_lst:
    #     print("----------------------------------")
    #     for name, value in config.items():
    #         if value:
    #             print(name, value)
    # print("graph schedules:")
    # for name, value in configs.graph_config.items():
    #     if value:
    #         print(name, value)
    op_configs = [{
        "spatial": [[1, 1, 1, 1], [1, 1, 1, 3], [454, 1, 1, 1], [1, 227, 2,
                                                                 1]],
        "unroll": [[1500, 1]]
    }, {
        "spatial": [[1, 1, 1, 1], [2, 4, 2, 4], [8, 1, 4, 7], [7, 1, 16, 2]],
        "reduce": [[1, 3, 1], [7, 1, 1], [7, 1, 1]],
        "unroll": [[1500, 1]]
    }]
    graph_config = {"inline": [[0, 0]]}
    configs = Config(op_configs, graph_config)

    s, bufs = schedule_with_config(task.key, configs)
    time_cost = _evaluate(s, bufs, "llvm", 0, 10)
    print("Use", time_cost, "ms")
    print("Cost", end - beg, "s")
コード例 #2
0
             trials=args.trials,
             parallel=args.parallel,
             method=args.method,
             use_model=args.use_model,
             rpc_info=rpc_info,
             force_inline=args.force_inline,
             logfile=flog,
         )
 else:
     ret = optimize(
         args.shapes,
         args.from_,
         shapes[args.from_:end],
         target=args.target,
         dev_id=args.device,
         timeout=args.timeout,
         trials=args.trials,
         parallel=args.parallel,
         method=args.method,
         use_model=args.use_model,
         rpc_info=rpc_info,
         force_inline=args.force_inline,
         logfile=sys.stdout,
     )
 if args.test != "":
     with open(args.test, "r") as fin:
         for line in fin:
             name, string = line.split(":", 1)
             obj = json.loads(string)
             configs = Config(obj[0], obj[1])
             test(name, configs, dev_id=args.device, rpc_info=rpc_info)
コード例 #3
0
def gemm_config(M, N, K, logits_dict):
    spatial_split_parts = 4
    reduce_split_parts = 4
    unroll_max_factor = 10

    sy = any_factor_split(M, spatial_split_parts)
    sx = any_factor_split(N, spatial_split_parts)
    sk = any_factor_split(K, reduce_split_parts)
    unroll = []
    for i in range(1):
        for j in range(unroll_max_factor + 1):
            unroll.append([i, 2**j])

    def _rational(lst, max_val):
        return torch.FloatTensor([[y / float(max_val) for y in x]
                                  for x in lst])

    nsy = _rational(sy, M)
    nsx = _rational(sx, N)
    nsk = _rational(sk, K)

    n_unroll = torch.FloatTensor([[x[0] / float(2) + 0.5,
                                   math.log2(x[1]) / 1] for x in unroll])

    # get logits
    spatial_logits = logits_dict["spatial"]
    reduce_logits = logits_dict["reduce"]
    unroll_logits = logits_dict["unroll"]

    # make choice
    feature_size = len(logits_dict["spatial"][0])
    split_classifier = model.MLP(feature_size + spatial_split_parts)
    unroll_classifier = model.MLP(feature_size + 2)
    cy = torch.argmax(
        split_classifier(
            torch.cat([
                nsy,
                torch.zeros([nsy.shape[0], feature_size]) + spatial_logits[0]
            ],
                      dim=1)))
    cx = torch.argmax(
        split_classifier(
            torch.cat([
                nsx,
                torch.zeros([nsx.shape[0], feature_size]) + spatial_logits[1]
            ],
                      dim=1)))
    ck = torch.argmax(
        split_classifier(
            torch.cat([
                nsk,
                torch.zeros([nsk.shape[0], feature_size]) + reduce_logits[0]
            ],
                      dim=1)))
    cu = torch.argmax(
        unroll_classifier(
            torch.cat([
                n_unroll,
                torch.zeros([n_unroll.shape[0], feature_size]) + unroll_logits
            ],
                      dim=1)))

    print(cy, cx, ck, cu)

    # print choice
    print("Print choice")
    print("split y =", sy[cy])
    print("split x =", sx[cx])
    print("split k =", sk[ck])
    print("unroll", unroll[cu])

    # make config
    op_config = [{
        "spatial": [sy[cy], sx[cx]],
        "reduce": [sk[ck]],
        "inline": [],
        "unroll": [unroll[cu]]
    }]
    graph_config = {"spatial": [], "reduce": [], "inline": [[0]], "unroll": []}
    return Config(op_config, graph_config)