コード例 #1
0
ファイル: metrics.py プロジェクト: peternara/ShapeAssembly-3d
def calc_tab3():
    ddir = sys.argv[1]
    inds = os.listdir(ddir)
    outs = []

    if len(sys.argv) > 2:
        inds = inds[:int(sys.argv[2])]

    for ind in tqdm(inds):
        if '.txt' in ind:
            hp = utils.loadHPFromFile(f'{ddir}/{ind}')
            verts, faces = hier_execute(hp)
        else:
            verts, faces = utils.loadObj(f'{ddir}/{ind}')
            verts = torch.tensor(verts)
            faces = torch.tensor(faces)
        outs.append((verts, faces))

    misses = 0.
    results = {
        'num_parts': [],
        'rootedness': [],
        'stability': [],
    }

    samples = []

    for (verts, faces) in tqdm(outs):

        results['num_parts'].append(verts.shape[0] / 8.0)
        samples.append((verts, faces))

        if check_rooted(verts, faces):
            results['rootedness'].append(1.)
        else:
            results['rootedness'].append(0.)

        if check_stability(verts, faces):
            results['stability'].append(1.)
        else:
            results['stability'].append(0.)

    for key in results:
        if len(results[key]) > 0:
            res = torch.tensor(results[key]).mean().item()
        else:
            res = 0.

        results[key] = res

    results['variance'] = eval_get_var(samples)

    for key in results:
        print(f"Result {key} : {results[key]}")
コード例 #2
0
def load_progs(dataset_path, max_shapes):
    inds = os.listdir(dataset_path)
    inds = [i.split('.')[0] for i in inds[:max_shapes]]
    good_inds = []
    progs = []
    for ind in tqdm(inds):
        hp = utils.loadHPFromFile(f'{dataset_path}/{ind}.txt')
        if hp is not None and len(hp) > 0:
            progs.append(hp)
            good_inds.append(ind)
    return good_inds, progs
コード例 #3
0
def run_generate(args):
    os.system(f'mkdir {outpath}/gen_{args.exp_name} > /dev/null 2>&1')

    decoder = torch.load(
        f"{outpath}/{args.model_name}/models/decoder_{args.load_epoch}.pt").to(
            device)
    encoder = torch.load(
        f"{outpath}/{args.model_name}/models/encoder_{args.load_epoch}.pt").to(
            device)

    random.seed(args.rd_seed)
    np.random.seed(args.rd_seed)
    torch.manual_seed(args.rd_seed)

    with torch.no_grad():
        if args.mode == "eval_gen":
            i = 0
            miss = 0
            while (i < args.num_gen):
                print(f"Gen {i}")
                try:
                    h0 = torch.randn(1, 1, args.hidden_dim).to(device)
                    prog, _ = run_eval_decoder(h0, decoder, True)
                    verts, faces = hier_execute(prog)

                    utils.writeObj(
                        verts, faces,
                        f"{outpath}/gen_{args.exp_name}/gen_{i}.obj")
                    utils.writeHierProg(
                        prog,
                        f"{outpath}/gen_{args.exp_name}/gen_prog_{i}.txt")
                    i += 1

                except Exception as e:
                    print(f"Failed to generate prog with {e}")
                    miss += 1

            print(f"Gen reject %: {miss / (args.num_gen + miss)}")

        if args.mode == "eval_recon":
            ind_file = f'data_splits/{args.category}/val.txt'
            inds = getInds(ind_file)
            for ind in tqdm(inds):
                gtprog = utils.loadHPFromFile(f'{args.dataset_path}/{ind}.txt')
                gtverts, gtfaces = hier_execute(gtprog)
                shape = progToData(gtprog)
                enc, _ = get_encoding(shape, encoder, mle=True)
                prog, _ = run_eval_decoder(enc, decoder, False)
                verts, faces = hier_execute(prog)
                utils.writeObj(
                    verts, faces,
                    f"{outpath}/gen_{args.exp_name}/{ind}_recon.obj")
                utils.writeObj(gtverts, gtfaces,
                               f"{outpath}/gen_{args.exp_name}/{ind}_gt.obj")
コード例 #4
0
def do_sa():
    train_ind_file = f'data_splits/chair/train.txt'
    train_inds = list(getInds(train_ind_file))

    encoder = torch.load(sa_enc).to(device)

    for ind in tqdm(train_inds):

        rprog = utils.loadHPFromFile(f'data/squeeze_chair/{ind}.txt')
        shape = progToData(rprog)
        enc, _ = get_encoding(shape, encoder, mle=True)

        torch.save(enc, f'sa_encs/{ind}.enc')
コード例 #5
0
 def load_progs2(dataset_path, max_shapes):
     inds = os.listdir(dataset_path)
     inds = [i.split('.')[0] for i in inds[:max_shapes]]
     good_inds = []
     progs = []
     for ind in tqdm(inds):
         hp = utils.loadHPFromFile(f'{dataset_path}/{ind}.txt')
         if hp is not None and len(hp) > 0:
             new_lines = fix_cube_count(hp['prog'])
             if new_lines is not None:
                 hp['prog'] = new_lines
                 progs.append(hp)
                 good_inds.append(ind)
     return good_inds, progs
コード例 #6
0
#         elif "attach" in l:
#             parse = P.parseAttach(l)
#             new_num = [round(x.item(), 3) for x in parse[2:]]
#             new_lines.append(f" attach({parse[0]}, {parse[1]}, {new_num[0]}," +
#                                       f" {new_num[1]}, {new_num[2]}, {new_num[3]}, {new_num[4]}, {new_num[5]})\n")
#         else:
#             new_lines.append(l)
#     with open(f"random_data_fixed/{f}", "w") as file:
#         for l in new_lines:
#             file.write(l)

files = os.listdir("random_hier_data")
P = Program()

for f in files:
    prog = loadHPFromFile(f"random_hier_data/{f}")

    def fix_lines(prog):
        def order(l):
            if "Cuboid(" in l:
                name = P.parseCuboid(l)[0]
                if name == "bbox":
                    return 0
                else:
                    return int(name[4:]) + 1
            elif ("reflect" in l) or ("translate" in l):
                return 1000
            else:
                return 100

        prog['prog'].sort(key=order)