Exemplo n.º 1
0
def main():
    cfg = ConfigTest
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    net = Network(cfg).to(device)
    print("model parameters:", sum(param.numel() for param in net.parameters()))

    for i in tqdm(range(10)):
        npts = cfg.num_points
        pcld = np.random.rand(1, npts, 3)
        feat = np.random.rand(1, 6, npts)
        n_layers = 4
        sub_s_r = [16, 1, 4, 1]
        inputs = {}
        for i in range(n_layers):
            nei_idx = DP.knn_search(pcld, pcld, 16)
            sub_pts = pcld[:, :pcld.shape[1] // sub_s_r[i], :]
            pool_i = nei_idx[:, :pcld.shape[1] // sub_s_r[i], :]
            up_i = torch.LongTensor(DP.knn_search(sub_pts, pcld, 1))
            inputs['xyz'] = inputs.get('xyz', []) + [torch.from_numpy(pcld).float().to(device)]
            inputs['neigh_idx'] = inputs.get('neigh_idx', []) + [torch.LongTensor(nei_idx).to(device)]
            inputs['sub_idx'] = inputs.get('sub_idx', []) + [torch.LongTensor(pool_i).to(device)]
            inputs['interp_idx'] = inputs.get('interp_idx', []) + [torch.LongTensor(up_i).to(device)]
            pcld = sub_pts
        inputs['features'] = torch.from_numpy(feat).float().to(device)

        end_points = net(inputs)

    for k, v in end_points.items():
        if type(v) == list:
            for ii, item in enumerate(v):
                print(k+'%d'%ii, item.size())
        else:
            print(k, v.size())
Exemplo n.º 2
0
                             shuffle=True,
                             num_workers=20,
                             worker_init_fn=my_worker_init_fn,
                             collate_fn=TEST_DATASET.collate_fn)

print(len(TRAIN_DATALOADER), len(TEST_DATALOADER))

#################################################   network   #################################################

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

net = Network(cfg)
net.to(device)

# Load the Adam optimizer
optimizer = optim.Adam(net.parameters(), lr=cfg.learning_rate)

# Load checkpoint if there is any
it = -1  # for the initialize value of `LambdaLR` and `BNMomentumScheduler`
start_epoch = 0
CHECKPOINT_PATH = FLAGS.checkpoint_path
if CHECKPOINT_PATH is not None and os.path.isfile(CHECKPOINT_PATH):
    checkpoint = torch.load(CHECKPOINT_PATH)
    net.load_state_dict(checkpoint['model_state_dict'])
    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    start_epoch = checkpoint['epoch']
    log_string("-> loaded checkpoint %s (epoch: %d)" %
               (CHECKPOINT_PATH, start_epoch))

if torch.cuda.device_count() > 1:
    log_string("Let's use %d GPUs!" % (torch.cuda.device_count()))