コード例 #1
0
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import torch.nn.functional as F

import numpy as np
import pickle

batch_size = 32
root_dir = "/home/songanz/Documents/Git_repo/fusion/"  # change this for your own usage

network = FrustumPointNet("Frustum-PointNet_eval_test_seq",
                          project_dir=root_dir)
network.load_state_dict(
    torch.load(root_dir + "pretrained_models/model_37_2_epoch_400.pth"))
network = network.cuda()

network.eval()  # (set in evaluation mode, this affects BatchNorm and dropout)

NH = network.BboxNet_network.NH

for sequence in [
        "0000", "0001", "0002", "0003", "0004", "0005", "0006", "0007", "0008",
        "0009", "0010", "0011", "0012", "0013", "0014", "0015", "0016", "0017",
        "0018", "0027"
]:
    print(sequence)

    test_dataset = DatasetKittiTestSequence(
        kitti_data_path=root_dir + "data/kitti",
        kitti_meta_path=root_dir + "data/kitti/meta",
コード例 #2
0
calib = calibread(calib_path)
P2 = calib["P2"]
Tr_velo_to_cam_orig = calib[
    "Tr_velo_to_cam"]  # Rigid transformation from Velodyne to (non-rectified) camera coordinates
R0_rect_orig = calib["R0_rect"]

R0_rect = np.eye(4)
R0_rect[0:3, 0:3] = R0_rect_orig

Tr_velo_to_cam = np.eye(4)
Tr_velo_to_cam[0:3, :] = Tr_velo_to_cam_orig
'''''' '''''' '''''' ''' Load Networks ''' '''''' '''''' ''''''
F_network = FrustumPointNet("Frustum-PointNet_eval_val_seq",
                            project_dir=root_dir)
F_network.load_state_dict(torch.load(F_weights))
F_network = F_network.cuda()
NH = F_network.BboxNet_network.NH

Y_network = Darknet(Y_cfgfile)
Y_network.load_weights(Y_weights)
Y_network.cuda()

F_network.eval(
)  # (set in evaluation mode, this affects BatchNorm, dropout etc.)
Y_network.eval()
'''''' '''''' '''''' ''' Useful functions ''' '''''' '''''' ''''''


def resize_img(img, img_size=416):
    h, w, _ = img.shape
    dim_diff = np.abs(h - w)