Exemple #1
0
generator.load_state_dict(
    torch.load(
        'result/saved_models/train-gan-costmap-vector16-01/generator_6000.pth')
)

trajectory_criterion = torch.nn.MSELoss().to(device)

e_optimizer = torch.optim.Adam(encoder.parameters(),
                               lr=opt.lr,
                               weight_decay=opt.weight_decay)

# param = parse_yaml_file_unsafe('./param_oxford.yaml')
# train_loader = DataLoader(OursDataset(param, mode='train', opt=opt), batch_size=opt.batch_size, shuffle=False, num_workers=opt.n_cpu)
# train_samples = iter(train_loader)

param = parse_yaml_file_unsafe('./param_kitti.yaml')
eval_loader = DataLoader(KittiDataset(param, mode='eval', opt=opt),
                         batch_size=1,
                         shuffle=False,
                         num_workers=1)
eval_samples = iter(eval_loader)


def show_traj(fake_traj, real_traj, t, step):
    fake_xy = fake_traj
    x = fake_xy[:, 0] * opt.max_dist
    y = fake_xy[:, 1] * opt.max_dist
    real_xy = real_traj
    real_x = real_xy[:, 0] * opt.max_dist
    real_y = real_xy[:, 1] * opt.max_dist
Exemple #2
0
start_point_criterion = torch.nn.MSELoss()
criterion = torch.nn.BCELoss()  #.to(device)
trajectory_criterion = torch.nn.MSELoss()
g_optimizer = torch.optim.RMSprop(generator.parameters(),
                                  lr=opt.lr,
                                  weight_decay=opt.weight_decay)
#g_optimizer = torch.optim.Adam(generator.parameters(), lr=opt.lr, weight_decay=opt.weight_decay)
d_optimizer = torch.optim.RMSprop(discriminator.parameters(),
                                  lr=opt.lr,
                                  weight_decay=opt.weight_decay)
#d_optimizer = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, weight_decay=opt.weight_decay)

# train_loader = DataLoader(CostMapDataset(data_index=[item for item in range(1,10)], opt=opt, dataset_path='/media/wang/DATASET/CARLA_HUMAN/town01/'), batch_size=opt.batch_size, shuffle=False, num_workers=opt.n_cpu)
# test_loader = DataLoader(CostMapDataset(data_index=[10], opt=opt, dataset_path='/media/wang/DATASET/CARLA_HUMAN/town01/'), batch_size=1, shuffle=False, num_workers=1)
# test_samples = iter(test_loader)
param = parse_yaml_file_unsafe('./param_oxford.yaml')
train_loader = DataLoader(GANDataset(param, mode='train', opt=opt),
                          batch_size=opt.batch_size,
                          shuffle=False,
                          num_workers=opt.n_cpu)
train_samples = iter(train_loader)

test_loader = DataLoader(GANDataset(param, mode='eval', opt=opt),
                         batch_size=1,
                         shuffle=False,
                         num_workers=1)
test_samples = iter(test_loader)


def test_traj(xs, ys, step):
    fig = plt.figure(figsize=(7, 7))
Exemple #3
0
description = 'change costmap'
log_path = 'result/log/' + opt.dataset_name + '/'
os.makedirs('result/saved_models/%s' % opt.dataset_name, exist_ok=True)
os.makedirs('result/output/%s' % opt.dataset_name, exist_ok=True)
if not opt.test_mode:
    logger = SummaryWriter(log_dir=log_path)
    write_params(log_path, parser, description)

model = ModelGRU(256).to(device)
#model.load_state_dict(torch.load('result/saved_models/kitti-train-ours-01/model_396000.pth'))
criterion = torch.nn.MSELoss().to(device)
optimizer = torch.optim.Adam(model.parameters(),
                             lr=opt.lr,
                             weight_decay=opt.weight_decay)

param = cu.parse_yaml_file_unsafe(
    '../../learning/robo_dataset_utils/params/param_kitti.yaml')
trajectory_dataset = TrajectoryDataset(param, 'train')  #7
dataloader = DataLoader(trajectory_dataset,
                        batch_size=opt.batch_size,
                        shuffle=False,
                        num_workers=opt.n_cpu)

eval_trajectory_dataset = TrajectoryDataset(param, 'eval')  #2
dataloader_eval = DataLoader(eval_trajectory_dataset,
                             batch_size=1,
                             shuffle=False,
                             num_workers=opt.n_cpu)
eval_samples = iter(dataloader_eval)


def xy2uv(x, y):
Exemple #4
0
description = 'change costmap'
log_path = 'result/log/' + opt.dataset_name + '/'
os.makedirs('result/saved_models/%s' % opt.dataset_name, exist_ok=True)
os.makedirs('result/output/%s' % opt.dataset_name, exist_ok=True)
if not opt.test_mode:
    logger = SummaryWriter(log_dir=log_path)
    write_params(log_path, parser, description)

model = RNN_MDN(256).to(device)
criterion = torch.nn.MSELoss().to(device)
optimizer = torch.optim.Adam(model.parameters(),
                             lr=opt.lr,
                             weight_decay=opt.weight_decay)

param = cu.parse_yaml_file_unsafe('robo_dataset_utils/params/param_kitti.yaml')
trajectory_dataset = TrajectoryDataset_CNNFC(param, 'train')  #7
dataloader = DataLoader(trajectory_dataset,
                        batch_size=opt.batch_size,
                        shuffle=False,
                        num_workers=opt.n_cpu)

eval_trajectory_dataset = TrajectoryDataset_CNNFC(param, 'eval')  #2
dataloader_eval = DataLoader(eval_trajectory_dataset,
                             batch_size=1,
                             shuffle=False,
                             num_workers=1)
eval_samples = iter(dataloader_eval)


def xy2uv(x, y):
Exemple #5
0
logger = SummaryWriter(log_dir=log_path)
write_params(log_path, parser, description)

sensor_dict = {
    'camera': {
        'transform': carla.Transform(carla.Location(x=0.5, y=0.0, z=2.5)),
        # 'callback':image_callback,
    },
    'lidar': {
        'transform': carla.Transform(carla.Location(x=0.5, y=0.0, z=2.5)),
        # 'callback':lidar_callback,
    },
}

# param = Param()
param = cu.parse_yaml_file_unsafe('./param.yaml')
sensor = cu.PesudoSensor(sensor_dict['camera']['transform'], config['camera'])
sensor_master = CarlaSensorMaster(sensor,
                                  sensor_dict['camera']['transform'],
                                  binded=True)
# collect_perspective = CollectPerspectiveImage(param, sensor_master)
camera_param = cu.CameraParams(sensor)
# import pdb; pdb.set_trace()
pm = PerspectiveMapping(param, camera_param.K_augment, camera_param.T_img_imu)

generator = Generator(opt.vector_dim + 256 + 1 + 1).to(device)
discriminator = Discriminator(opt.points_num * 2 + 256 + 1).to(device)
encoder = CNN(input_dim=3, out_dim=256).to(device)
# discriminator.load_state_dict(torch.load('result/saved_models/train-cgan-12/discriminator_1000.pth'))
# generator.load_state_dict(torch.load('result/saved_models/train-cgan-12/generator_1000.pth'))
# encoder.load_state_dict(torch.load('result/saved_models/train-cgan-12/encoder_1000.pth'))