Esempio n. 1
0
                    type=float,
                    default=0.05,
                    help='discretization minimum time interval')
parser.add_argument('--rnn_steps',
                    type=int,
                    default=10,
                    help='rnn readout steps')
args = parser.parse_args()

data_index = args.data
save_path = '/media/wang/DATASET/CARLA/town01/' + str(data_index) + '/'

log_path = '/home/cz/result/log/' + args.name + '/'
ckpt_path = '/home/cz/result/saved_models/%s' % args.name
logger = SummaryWriter(log_dir=log_path)
generator = Generator(input_dim=1 + 1 + args.vector_dim, output=2).to(device)
model = TD3(args=args,
            buffer_size=1e5,
            noise_decay_steps=3e3,
            batch_size=64,
            logger=logger,
            policy_freq=5,
            is_fix_policy_net=True)  #48 85
# encoder = EncoderWithV(input_dim=6, out_dim=args.vector_dim).to(device)
try:
    model.policy_net.load_state_dict(
        torch.load(
            '/home/cz/Downloads/learning-uncertainty-master/scripts/encoder_e2e.pth'
        ))
    # model.policy_net.load_state_dict(torch.load('/home/cz/result/saved_models/rl-train-img-nav-04-train/115_policy_net.pkl'))
    model.value_net1.load_state_dict(
Esempio n. 2
0
if opt.test_mode: opt.batch_size = 1

description = 'dropout'
log_path = 'result/log/' + opt.dataset_name + '/'
os.makedirs('result/saved_models/%s' % opt.dataset_name, exist_ok=True)
os.makedirs('result/output/%s' % opt.dataset_name, exist_ok=True)
os.makedirs('result/output2/%s' % opt.dataset_name, exist_ok=True)
os.makedirs('result/output3/%s' % opt.dataset_name, exist_ok=True)
if not opt.test_mode:
    logger = SummaryWriter(log_dir=log_path)
    write_params(log_path, parser, description)

# generator = Generator(input_dim=128+32+1+1, output=2).to(device)
# discriminator = Discriminator(opt.points_num*2+32+1).to(device)

generator = Generator(input_dim=2 + 2 + 1 + 1, output=2).to(device)
discriminator = Discriminator(opt.points_num * 2 + 2 + 1).to(device)
# encoder = CNN(input_dim=1, out_dim=32).to(device)
encoder = CNNNorm(input_dim=1, out_dim=2).to(device)
encoder.load_state_dict(
    torch.load('result/saved_models/il-uncertainty-02/encoder_119000.pth'))
# DO NOT TRAIN ENCODER
encoder.eval()
# discriminator.load_state_dict(torch.load('result/saved_models/train-gan-costmap-01/discriminator_120000.pth'))
generator.load_state_dict(
    torch.load(
        'result/saved_models/train-gan-costmap-01/generator_120000.pth'))

start_point_criterion = torch.nn.MSELoss()
criterion = torch.nn.BCELoss()  #.to(device)
trajectory_criterion = torch.nn.MSELoss()
Esempio n. 3
0
parser.add_argument('--max_dist', type=float, default=25., help='max distance')
parser.add_argument('--max_speed', type=float, default=10., help='max speed')
parser.add_argument('--max_t', type=float, default=3., help='max time')
opt = parser.parse_args()
if opt.test_mode: opt.batch_size = 1

description = 'wgan-gp mirror v0/opt.max_speed'
log_path = 'result/log/' + opt.dataset_name + '/'
os.makedirs('result/saved_models/%s' % opt.dataset_name, exist_ok=True)
os.makedirs('result/output/%s' % opt.dataset_name, exist_ok=True)
os.makedirs('result/output2/%s' % opt.dataset_name, exist_ok=True)
if not opt.test_mode:
    logger = SummaryWriter(log_dir=log_path)
    write_params(log_path, parser, description)

generator = Generator(opt.vector_dim + 2).to(device)
discriminator = Discriminator(opt.points_num * 2 + 1).to(device)
#discriminator.load_state_dict(torch.load('result/saved_models/wgan-gp-10/discriminator_40000.pth'))
#generator.load_state_dict(torch.load('result/saved_models/wgan-gp-10/generator_40000.pth'))

start_point_criterion = torch.nn.MSELoss()
criterion = torch.nn.BCELoss()  #.to(device)
trajectory_criterion = torch.nn.MSELoss()
g_optimizer = torch.optim.RMSprop(generator.parameters(),
                                  lr=opt.lr,
                                  weight_decay=opt.weight_decay)
#g_optimizer = torch.optim.Adam(generator.parameters(), lr=opt.lr, weight_decay=opt.weight_decay)
d_optimizer = torch.optim.RMSprop(discriminator.parameters(),
                                  lr=opt.lr,
                                  weight_decay=opt.weight_decay)
#d_optimizer = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, weight_decay=opt.weight_decay)
Esempio n. 4
0
global_dict['max_steer_angle'] = 0.
global_dict['ipm_image'] = np.zeros((200, 400), dtype=np.uint8)
global_dict['ipm_image'].fill(255)
global_dict['trans_costmap_dict'] = {}
global_dict['state0'] = None
global_dict['start_control'] = False

random.seed(datetime.now())
torch.manual_seed(999)
torch.cuda.manual_seed(999)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

generator = GeneratorUNet()
generator = generator.to(device)
generator.load_state_dict(torch.load('../ckpt/g.pth'))
trajectory_model = Generator(4).to(device)
trajectory_model.load_state_dict(
    torch.load('../result/saved_models/train-gan-02/generator_20000.pth'))
trajectory_model.eval()
generator.eval()

parser = argparse.ArgumentParser(description='Params')
parser.add_argument('--name',
                    type=str,
                    default="rl-train-04",
                    help='name of the script')
parser.add_argument('-s',
                    '--save',
                    type=bool,
                    default=False,
                    help='save result')
Esempio n. 5
0
        # 'callback':lidar_callback,
    },
}

# param = Param()
param = cu.parse_yaml_file_unsafe('./param.yaml')
sensor = cu.PesudoSensor(sensor_dict['camera']['transform'], config['camera'])
sensor_master = CarlaSensorMaster(sensor,
                                  sensor_dict['camera']['transform'],
                                  binded=True)
# collect_perspective = CollectPerspectiveImage(param, sensor_master)
camera_param = cu.CameraParams(sensor)
# import pdb; pdb.set_trace()
pm = PerspectiveMapping(param, camera_param.K_augment, camera_param.T_img_imu)

generator = Generator(opt.vector_dim + 256 + 1 + 1).to(device)
discriminator = Discriminator(opt.points_num * 2 + 256 + 1).to(device)
encoder = CNN(input_dim=3, out_dim=256).to(device)
# discriminator.load_state_dict(torch.load('result/saved_models/train-cgan-12/discriminator_1000.pth'))
# generator.load_state_dict(torch.load('result/saved_models/train-cgan-12/generator_1000.pth'))
# encoder.load_state_dict(torch.load('result/saved_models/train-cgan-12/encoder_1000.pth'))
# discriminator.load_state_dict(torch.load('result/saved_models/train-cgan-01/discriminator_10000.pth'))
# generator.load_state_dict(torch.load('result/saved_models/train-cgan-01/generator_10000.pth'))
# encoder.load_state_dict(torch.load('result/saved_models/train-cgan-01/encoder_10000.pth'))
# discriminator.load_state_dict(torch.load('result/saved_models/train-cgan-09/discriminator_87000.pth'))
# generator.load_state_dict(torch.load('result/saved_models/train-cgan-09/generator_87000.pth'))
# encoder.load_state_dict(torch.load('result/saved_models/train-cgan-09/encoder_87000.pth'))

start_point_criterion = torch.nn.MSELoss()
trajectory_criterion = torch.nn.MSELoss()
g_optimizer = torch.optim.RMSprop(generator.parameters(),
parser.add_argument('--max_dist', type=float, default=25., help='max distance')
parser.add_argument('--max_speed', type=float, default=10., help='max speed')
parser.add_argument('--max_t', type=float, default=3., help='max time')
opt = parser.parse_args()
if opt.test_mode: opt.batch_size = 1

description = 'CNN'
log_path = 'result/log/' + opt.dataset_name + '/'
os.makedirs('result/saved_models/%s' % opt.dataset_name, exist_ok=True)
os.makedirs('result/output/%s' % opt.dataset_name, exist_ok=True)
os.makedirs('result/output2/%s' % opt.dataset_name, exist_ok=True)
if not opt.test_mode:
    logger = SummaryWriter(log_dir=log_path)
    write_params(log_path, parser, description)

generator = Generator(opt.vector_dim + 2, output=4).to(device)
# generator.load_state_dict(torch.load('./result/saved_models/il-uncertainty-02/generator_119000.pth'))

encoder = CNN(input_dim=1, out_dim=opt.vector_dim).to(device)

encoder.load_state_dict(
    torch.load('./result/saved_models/il-uncertainty-02/encoder_119000.pth'))
generator.load_state_dict(
    torch.load('./result/saved_models/il-uncertainty-02/generator_119000.pth'))

criterion = torch.nn.MSELoss()
e_optimizer = torch.optim.Adam(encoder.parameters(),
                               lr=opt.lr,
                               weight_decay=opt.weight_decay)
g_optimizer = torch.optim.Adam(generator.parameters(),
                               lr=opt.lr,