def load_data(self, config_dataprep, logger):
        """
        Load data according to phase (train or test). Note that the data can either be padded or not, depending on the boolean self.args.lstm_pool.
        :param config_dataprep: Configurations for data-loading
        :param logger: Logger in order to log during data-loading
        :return: dataset, loader
        """
        print("load data...")

        if self.args.phase == "train":
            dset, loader = data_loader(self.args,
                                       config_dataprep,
                                       phase="train",
                                       logger=logger)
            self.args.data_augmentation = False
            dset_val, val_loader = data_loader(self.args,
                                               config_dataprep,
                                               phase="val",
                                               logger=logger)
        elif self.args.phase == "test":
            dset, loader = data_loader(self.args,
                                       config_dataprep,
                                       phase="test",
                                       logger=logger)
            dset_val, val_loader = 0, 0
        else:
            raise ValueError("Please choose either train or test as phase!")

        print("data loaded.")

        return dset, loader, dset_val, val_loader
def main(args):
    checkpoint = torch.load(args.resume)
    generator = get_generator(checkpoint)
    path = get_dset_path(args.dataset_name, args.dset_type)

    _, loader = data_loader(args, path)
    plot_trajectory(args, loader, generator)
def main(args):
    checkpoint = torch.load(args.resume)
    generator = get_generator(checkpoint)
    path = get_dset_path(args.dataset_name, args.dset_type)

    _, loader = data_loader(args, path)
    prediction = evaluate(args, loader, generator)
    print(len(prediction))
    print(prediction[0])
def main(args):
    checkpoint = torch.load(args.resume)
    generator = get_generator(checkpoint)
    path = get_dset_path(args.dataset_name, args.dset_type)

    _, loader = data_loader(args, path)
    ade, fde = evaluate(args, loader, generator)
    print("Dataset: {}, Pred Len: {}, ADE: {:.12f}, FDE: {:.12f}".format(
        args.dataset_name, args.pred_len, ade, fde))
Exemple #5
0
def main(args):

    if args.ckpt_load_iter == args.max_iter:

        print("Initializing test dataset")
        solver = Solver(args)
        _, test_loader = data_loader(args,
                                     args.dataset_dir,
                                     'test',
                                     shuffle=False)
        solver.recon(test_loader)

    else:
        solver = Solver(args)
        solver.train()
Exemple #6
0
def main(args):
    if args.ckpt_load_iter == args.max_iter:

        print("Initializing test dataset")
        solver = Solver(args)

        print('--------------------', args.dataset_name,
              '----------------------')

        args.batch_size = 3

        # from data.nuscenes.config import Config
        # from data.nuscenes_dataloader import data_generator
        # cfg = Config('nuscenes', False, create_dirs=True)
        # torch.set_default_dtype(torch.float32)
        # log = open('log.txt', 'a+')
        # test_loader = data_generator(cfg, log, split='test', phase='testing',
        #                                  batch_size=args.batch_size, device=args.device, scale=args.scale, shuffle=False)

        _, test_loader = data_loader(args,
                                     args.dataset_dir,
                                     'test',
                                     shuffle=False)

        gh = True
        print("GEN HEAT MAP: ", gh)
        '''
        ############## kitti
        # traj_path = 'ki.traj_zD_20_dr_mlp_0.3_dr_rnn_0.25_enc_hD_64_dec_hD_128_mlpD_256_map_featD_32_map_mlpD_256_lr_0.001_klw_50.0_ll_prior_w_1.0_zfb_2.0_scale_1.0_num_sg_1_run_1'
        # traj_path = 'ki.traj_zD_20_dr_mlp_0.3_dr_rnn_0.25_enc_hD_64_dec_hD_128_mlpD_256_map_featD_32_map_mlpD_256_lr_0.001_klw_50.0_ll_prior_w_1.0_zfb_2.0_scale_1.0_num_sg_5_run_1'
        traj_path = 'ki.lgcvae_zD_20_dr_mlp_0.3_dr_rnn_0.25_enc_hD_64_dec_hD_128_mlpD_256_map_featD_32_map_mlpD_256_lr_0.0001_klw_50.0_ll_prior_w_1.0_zfb_0.07_scale_1.0_num_sg_1_run_2'
        # traj_iter = '25110'
        # traj_iter = '18090'
        traj_iter = '5940'
        traj_ckpt = {'ckpt_dir': os.path.join('ckpts', traj_path), 'iter': traj_iter}
        print('===== TRAJECTORY:', traj_ckpt)


        lg_path = 'ki.lgcvae_enc_block_1_fcomb_block_2_wD_10_lr_0.0001_lg_klw_1.0_a_0.25_r_2.0_fb_3.0_anneal_e_10_aug_1_llprior_1.0_run_0'
        lg_iter = '19440'
        lg_ckpt = {'ckpt_dir': os.path.join('ckpts', lg_path), 'iter': lg_iter}
        print('===== LG CVAE:', lg_ckpt)

        sg_path = 'ki.sg_lr_0.0001_a_0.25_r_2.0_aug_1_num_sg_5_run_1'
        sg_iter = '15120'
        sg_ckpt = {'ckpt_dir': os.path.join('ckpts', sg_path), 'iter': sg_iter}
        print('===== SG CVAE:', sg_ckpt)



        
        ############## sdd
        traj_path = 'sdd.traj_zD_20_dr_mlp_0.3_dr_rnn_0.25_enc_hD_64_dec_hD_128_mlpD_256_map_featD_32_map_mlpD_256_lr_0.001_klw_50.0_ll_prior_w_1.0_zfb_1.0_scale_100.0_num_sg_3_run_203'
        traj_iter = '42000'
        traj_ckpt = {'ckpt_dir': os.path.join('ckpts', traj_path), 'iter': traj_iter}
        print('===== TRAJECTORY:', traj_ckpt)

        lg_path = 'sdd.lgcvae_enc_block_1_fcomb_block_3_wD_20_lr_0.0001_lg_klw_1.0_a_0.25_r_2.0_fb_6.0_anneal_e_10_aug_1_run_23'
        lg_iter = '59000'
        lg_ckpt = {'ckpt_dir': os.path.join('ckpts', lg_path), 'iter': lg_iter}
        print('===== LG CVAE:', lg_ckpt)

        sg_path = 'sdd.sg_lr_0.0001_a_0.25_r_2.0_aug_1_scale_1.0_num_sg_3_run_8'
        sg_iter = '12500'
        sg_ckpt = {'ckpt_dir': os.path.join('ckpts', sg_path), 'iter': sg_iter}
        print('===== SG CVAE:', sg_ckpt)


        
        ############## Path
        traj_path = 'traj_zD_20_dr_mlp_0.3_dr_rnn_0.25_enc_hD_64_dec_hD_128_mlpD_256_map_featD_32_map_mlpD_256_lr_0.001_klw_50.0_ll_prior_w_1.0_zfb_0.07_scale_1.0_run_103'
        traj_path = 'traj_zD_20_dr_mlp_0.3_dr_rnn_0.25_enc_hD_64_dec_hD_128_mlpD_256_map_featD_32_map_mlpD_256_lr_0.001_klw_50.0_ll_prior_w_1.0_zfb_2.0_scale_1.0_num_sg_3_run_313'
        traj_path = 'traj_zD_20_dr_mlp_0.3_dr_rnn_0.25_enc_hD_64_dec_hD_128_mlpD_256_map_featD_32_map_mlpD_256_lr_0.001_klw_50.0_ll_prior_w_1.0_zfb_2.0_scale_1.0_num_sg_3_run_308'
        traj_iter = '51000'
        traj_iter = '33500'
        traj_iter = '67000'
        traj_ckpt = {'ckpt_dir': os.path.join('ckpts', traj_path), 'iter': traj_iter}
        print('===== TRAJECTORY:', traj_ckpt)

        lg_path = 'lgcvae_enc_block_1_fcomb_block_2_wD_10_lr_0.001_lg_klw_1.0_a_0.25_r_2.0_fb_0.8_anneal_e_10_load_e_3_run_101'
        lg_path = 'lgcvae_enc_block_1_fcomb_block_2_wD_10_lr_0.001_lg_klw_1.0_a_0.25_r_2.0_fb_0.5_anneal_e_10_load_e_1_pos_0.0_v1_0.0_2.0_v2_0.0_2.0_run_312'
        lg_path = 'lgcvae_enc_block_1_fcomb_block_2_wD_10_lr_0.001_lg_klw_1.0_a_0.25_r_2.0_fb_0.5_anneal_e_10_load_e_1_pos_1.0_v1_0.0_2.0_v2_1.0_2.0_run_312'
        lg_path = 'lgcvae_enc_block_1_fcomb_block_2_wD_10_lr_0.001_lg_klw_1.0_a_0.25_r_2.0_fb_0.7_anneal_e_10_load_e_1_run_308'
        lg_iter = '34000'
        lg_iter = '40200'
        lg_iter = '41540'
        lg_iter = '42880'
        lg_ckpt = {'ckpt_dir': os.path.join('ckpts', lg_path), 'iter': lg_iter}
        print('===== LG CVAE:', lg_ckpt)

        sg_path = 'sg_lr_0.001_a_0.25_r_2.0_run_101'
        sg_iter = '17000'
        sg_ckpt = {'ckpt_dir': os.path.join('ckpts', sg_path), 'iter': sg_iter}
        print('===== SG CVAE:', sg_ckpt)
        '''

        ############## sdd
        traj_path = 'a2a.traj_zD_20_dr_mlp_0.3_dr_rnn_0.25_enc_hD_64_dec_hD_128_mlpD_256_map_featD_32_map_mlpD_256_lr_0.001_klw_50.0_ll_prior_w_1.0_zfb_2.0_scale_1.0_num_sg_3_coll_th_1.5_w_coll_1.0_beta_1.0_run_0'
        traj_iter = '31140'
        traj_ckpt = {
            'ckpt_dir': os.path.join('ckpts', traj_path),
            'iter': traj_iter
        }
        print('===== TRAJECTORY:', traj_ckpt)

        lg_path = 'a2a.lgcvae_enc_block_1_fcomb_block_2_wD_10_lr_0.0001_lg_klw_1.0_a_0.25_r_2.0_fb_6.0_anneal_e_10_aug_1_llprior_1.0_run_1'
        lg_iter = '20760'
        lg_ckpt = {'ckpt_dir': os.path.join('ckpts', lg_path), 'iter': lg_iter}
        print('===== LG CVAE:', lg_ckpt)

        sg_path = 'a2a.sg_lr_0.001_a_0.25_r_2.0_aug_1_num_sg_3_run_1'
        sg_iter = '20760'
        sg_ckpt = {'ckpt_dir': os.path.join('ckpts', sg_path), 'iter': sg_iter}
        print('===== SG CVAE:', sg_ckpt)

        solver.pretrain_load_checkpoint(traj_ckpt, lg_ckpt, sg_ckpt)
        # solver.make_pred(test_loader, lg_num=20, traj_num=1, generate_heat=True)
        # solver.make_ecfl(test_loader, lg_num=20, traj_num=1, generate_heat=True)

        # solver.make_pred_12sg(test_loader)
        # solver.evaluate_each(test_loader)

        # solver.check_feat(test_loader)

        # solver.plot_traj_var(test_loader)
        # solver.check_feat(test_loader)

        lg_num = 4
        traj_num = 1

        ade_min, fde_min, \
        ade_avg, fde_avg, \
        ade_std, fde_std, \
        sg_ade_min, sg_ade_avg, sg_ade_std, \
        lg_fde_min, lg_fde_avg, lg_fde_std = solver.all_evaluation(test_loader, lg_num=lg_num, traj_num=traj_num, generate_heat=gh)

        print('lg_num: ', lg_num, ' // traj_num: ', traj_num)
        print('ade min: ', ade_min)
        print('ade avg: ', ade_avg)
        print('ade std: ', ade_std)
        print('fde min: ', fde_min)
        print('fde avg: ', fde_avg)
        print('fde std: ', fde_std)
        print('sg_ade_min: ', sg_ade_min)
        print('sg_ade_avg: ', sg_ade_avg)
        print('sg_ade_std: ', sg_ade_std)
        print('lg_fde_min: ', lg_fde_min)
        print('lg_fde_avg: ', lg_fde_avg)
        print('lg_fde_std: ', lg_fde_std)
        print('------------------------------------------')

        lg_num = 20
        traj_num = 1

        ade_min, fde_min, \
        ade_avg, fde_avg, \
        ade_std, fde_std, \
        sg_ade_min, sg_ade_avg, sg_ade_std, \
        lg_fde_min, lg_fde_avg, lg_fde_std = solver.all_evaluation(test_loader, lg_num=lg_num, traj_num=traj_num, generate_heat=gh)

        print('lg_num: ', lg_num, ' // traj_num: ', traj_num)
        print('ade min: ', ade_min)
        print('ade avg: ', ade_avg)
        print('ade std: ', ade_std)
        print('fde min: ', fde_min)
        print('fde avg: ', fde_avg)
        print('fde std: ', fde_std)
        print('sg_ade_min: ', sg_ade_min)
        print('sg_ade_avg: ', sg_ade_avg)
        print('sg_ade_std: ', sg_ade_std)
        print('lg_fde_min: ', lg_fde_min)
        print('lg_fde_avg: ', lg_fde_avg)
        print('lg_fde_std: ', lg_fde_std)
        print('------------------------------------------')


        ade_min, fde_min, \
        ade_avg, fde_avg, \
        ade_std, fde_std, \
        sg_ade_min, sg_ade_avg, sg_ade_std, \
        lg_fde_min, lg_fde_avg, lg_fde_std = solver.evaluate_dist(test_loader, loss=False)

        print('ade min: ', ade_min)
        print('ade avg: ', ade_avg)
        print('ade std: ', ade_std)
        print('fde min: ', fde_min)
        print('fde avg: ', fde_avg)
        print('fde std: ', fde_std)
        print('sg_ade_min: ', sg_ade_min)
        print('sg_ade_avg: ', sg_ade_avg)
        print('sg_ade_std: ', sg_ade_std)
        print('lg_fde_min: ', lg_fde_min)
        print('lg_fde_avg: ', lg_fde_avg)
        print('lg_fde_std: ', lg_fde_std)
        print('------------------------------------------')

    else:
        solver = Solver(args)
        solver.train()
Exemple #7
0
def main(args):
    if args.ckpt_load_iter == args.max_iter:

        print("Initializing test dataset")
        solver = Solver(args)

        print('--------------------', args.dataset_name,
              '----------------------')
        test_path = os.path.join(args.dataset_dir, args.dataset_name, 'test')
        args.batch_size = 5
        _, test_loader = data_loader(args, test_path, shuffle=True)

        solver.evaluate_dist(test_loader, loss=False)
        solver.check_feat(test_loader)

        gh = True
        print("GEN HEAT MAP: ", gh)

        traj_path = 'traj_zD_20_dr_mlp_0.3_dr_rnn_0.25_enc_hD_64_dec_hD_128_mlpD_256_map_featD_32_map_mlpD_256_lr_0.001_klw_50.0_ll_prior_w_1.0_zfb_0.07_run_4'

        traj_iter = '13000'
        traj_ckpt = {
            'ckpt_dir': os.path.join('ckpts', traj_path),
            'iter': traj_iter
        }
        print('===== TRAJECTORY:', traj_ckpt)

        lg_path = 'lgcvae_enc_block_1_fcomb_block_2_wD_10_lr_0.001_lg_klw_1_a_0.25_r_2.0_fb_2.0_anneal_e_0_load_e_1_run_21'
        lg_iter = '26000'
        lg_ckpt = {'ckpt_dir': os.path.join('ckpts', lg_path), 'iter': lg_iter}
        print('===== LG CVAE:', lg_iter)

        solver.pretrain_load_checkpoint(traj_ckpt, lg_ckpt)

        # solver.plot_traj_var(test_loader)
        # solver.evaluate_dist_gt_goal(test_loader)
        # solver.check_feat(test_loader)

        lg_num = 5
        traj_num = 4

        ade_min, fde_min, \
        ade_avg, fde_avg, \
        ade_std, fde_std, \
        sg_ade_min, sg_ade_avg, sg_ade_std, \
        lg_fde_min, lg_fde_avg, lg_fde_std = solver.all_evaluation(test_loader, lg_num=lg_num, traj_num=traj_num, generate_heat=gh)

        print('lg_num: ', lg_num, ' // traj_num: ', traj_num)
        print('ade min: ', ade_min)
        print('ade avg: ', ade_avg)
        print('ade std: ', ade_std)
        print('fde min: ', fde_min)
        print('fde avg: ', fde_avg)
        print('fde std: ', fde_std)
        print('sg_ade_min: ', sg_ade_min)
        print('sg_ade_avg: ', sg_ade_avg)
        print('sg_ade_std: ', sg_ade_std)
        print('lg_fde_min: ', lg_fde_min)
        print('lg_fde_avg: ', lg_fde_avg)
        print('lg_fde_std: ', lg_fde_std)
        print('------------------------------------------')

        lg_num = 20
        traj_num = 1

        ade_min, fde_min, \
        ade_avg, fde_avg, \
        ade_std, fde_std, \
        sg_ade_min, sg_ade_avg, sg_ade_std, \
        lg_fde_min, lg_fde_avg, lg_fde_std = solver.all_evaluation(test_loader, lg_num=lg_num, traj_num=traj_num, generate_heat=gh)

        print('lg_num: ', lg_num, ' // traj_num: ', traj_num)
        print('ade min: ', ade_min)
        print('ade avg: ', ade_avg)
        print('ade std: ', ade_std)
        print('fde min: ', fde_min)
        print('fde avg: ', fde_avg)
        print('fde std: ', fde_std)
        print('sg_ade_min: ', sg_ade_min)
        print('sg_ade_avg: ', sg_ade_avg)
        print('sg_ade_std: ', sg_ade_std)
        print('lg_fde_min: ', lg_fde_min)
        print('lg_fde_avg: ', lg_fde_avg)
        print('lg_fde_std: ', lg_fde_std)
        print('------------------------------------------')


        ade_min, fde_min, \
        ade_avg, fde_avg, \
        ade_std, fde_std, \
        sg_ade_min, sg_ade_avg, sg_ade_std, \
        lg_fde_min, lg_fde_avg, lg_fde_std = solver.evaluate_dist(test_loader, loss=False)

        print('ade min: ', ade_min)
        print('ade avg: ', ade_avg)
        print('ade std: ', ade_std)
        print('fde min: ', fde_min)
        print('fde avg: ', fde_avg)
        print('fde std: ', fde_std)
        print('sg_ade_min: ', sg_ade_min)
        print('sg_ade_avg: ', sg_ade_avg)
        print('sg_ade_std: ', sg_ade_std)
        print('lg_fde_min: ', lg_fde_min)
        print('lg_fde_avg: ', lg_fde_avg)
        print('lg_fde_std: ', lg_fde_std)
        print('------------------------------------------')

    else:
        solver = Solver(args)
        solver.train()
Exemple #8
0
def main(args):
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_num
    train_path = get_dset_path(args.dataset_name, "train")
    val_path = get_dset_path(args.dataset_name, "test")

    logging.info("Initializing train dataset")
    train_dset, train_loader = data_loader(args, train_path)
    logging.info("Initializing val dataset")
    _, val_loader = data_loader(args, val_path)

    writer = SummaryWriter()

    n_units = ([args.traj_lstm_hidden_size] +
               [int(x) for x in args.hidden_units.strip().split(",")] +
               [args.graph_lstm_hidden_size])
    n_heads = [int(x) for x in args.heads.strip().split(",")]

    model = TrajectoryGenerator(
        obs_len=args.obs_len,
        pred_len=args.pred_len,
        traj_lstm_input_size=args.traj_lstm_input_size,
        traj_lstm_hidden_size=args.traj_lstm_hidden_size,
        n_units=n_units,
        n_heads=n_heads,
        graph_network_out_dims=args.graph_network_out_dims,
        dropout=args.dropout,
        alpha=args.alpha,
        graph_lstm_hidden_size=args.graph_lstm_hidden_size,
        noise_dim=args.noise_dim,
        noise_type=args.noise_type,
    )
    model.cuda()
    optimizer = optim.Adam(
        [
            {
                "params": model.traj_lstm_model.parameters(),
                "lr": 1e-2
            },
            {
                "params": model.traj_hidden2pos.parameters()
            },
            {
                "params": model.gatencoder.parameters(),
                "lr": 3e-2
            },
            {
                "params": model.graph_lstm_model.parameters(),
                "lr": 1e-2
            },
            {
                "params": model.traj_gat_hidden2pos.parameters()
            },
            {
                "params": model.pred_lstm_model.parameters()
            },
            {
                "params": model.pred_hidden2pos.parameters()
            },
        ],
        lr=args.lr,
    )
    global best_ade
    if args.resume:
        if os.path.isfile(args.resume):
            logging.info("Restoring from checkpoint {}".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint["epoch"]
            model.load_state_dict(checkpoint["state_dict"])
            logging.info("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint["epoch"]))
        else:
            logging.info("=> no checkpoint found at '{}'".format(args.resume))

    training_step = 1
    for epoch in range(args.start_epoch, args.num_epochs + 1):
        if epoch < 150:
            training_step = 1
        elif epoch < 250:
            training_step = 2
        else:
            if epoch == 250:
                for param_group in optimizer.param_groups:
                    param_group["lr"] = 5e-3
            training_step = 3
        train(args, model, train_loader, optimizer, epoch, training_step,
              writer)
        if training_step == 3:
            ade = validate(args, model, val_loader, epoch, writer)
            is_best = ade < best_ade
            best_ade = min(ade, best_ade)

            save_checkpoint(
                {
                    "epoch": epoch + 1,
                    "state_dict": model.state_dict(),
                    "best_ade": best_ade,
                    "optimizer": optimizer.state_dict(),
                },
                is_best,
                f"./checkpoint/checkpoint{epoch}.pth.tar",
            )
    writer.close()
Exemple #9
0
    def __init__(self, args):

        self.args = args
        args.num_sg = args.load_e
        self.name = '%s_bs%s_zD_%s_dr_mlp_%s_dr_rnn_%s_enc_hD_%s_dec_hD_%s_mlpD_%s_lr_%s_klw_%s_ll_prior_w_%s_zfb_%s_scale_%s_num_sg_%s' \
                    'ctxtD_%s_coll_th_%s_w_coll_%s_beta_%s_lr_e_%s_k_%s' % \
                    (args.dataset_name, args.batch_size, args.zS_dim, args.dropout_mlp, args.dropout_rnn, args.encoder_h_dim,
                     args.decoder_h_dim, args.mlp_dim, args.lr_VAE, args.kl_weight,
                     args.ll_prior_w, args.fb, args.scale, args.num_sg, args.context_dim, args.coll_th, args.w_coll, args.beta, args.lr_e, args.k_fold)

        # to be appended by run_id

        # self.use_cuda = args.cuda and torch.cuda.is_available()
        self.device = args.device
        self.temp = 1.99
        self.dt = 0.4
        self.eps = 1e-9
        self.ll_prior_w = args.ll_prior_w
        self.sg_idx = np.array(range(12))
        self.sg_idx = np.flip(11 - self.sg_idx[::(12 // args.num_sg)])

        self.coll_th = args.coll_th
        self.beta = args.beta
        self.context_dim = args.context_dim
        self.w_coll = args.w_coll

        self.z_fb = args.fb
        self.scale = args.scale

        self.kl_weight = args.kl_weight
        self.lg_kl_weight = args.lg_kl_weight

        self.max_iter = int(args.max_iter)

        # do it every specified iters
        self.print_iter = args.print_iter
        self.ckpt_save_iter = args.ckpt_save_iter
        self.output_save_iter = args.output_save_iter

        # data info
        args.dataset_dir = os.path.join(args.dataset_dir, str(args.k_fold))

        self.dataset_dir = args.dataset_dir
        self.dataset_name = args.dataset_name

        # self.N = self.latent_values.shape[0]
        # self.eval_metrics_iter = args.eval_metrics_iter

        # networks and optimizers
        self.batch_size = args.batch_size
        self.zS_dim = args.zS_dim
        self.w_dim = args.w_dim
        self.lr_VAE = args.lr_VAE
        self.beta1_VAE = args.beta1_VAE
        self.beta2_VAE = args.beta2_VAE
        print(args.desc)

        # create dirs: "records", "ckpts", "outputs" (if not exist)
        mkdirs("records")
        mkdirs("ckpts")
        mkdirs("outputs")

        # set run id
        if args.run_id < 0:  # create a new id
            k = 0
            rfname = os.path.join("records", self.name + '_run_0.txt')
            while os.path.exists(rfname):
                k += 1
                rfname = os.path.join("records", self.name + '_run_%d.txt' % k)
            self.run_id = k
        else:  # user-provided id
            self.run_id = args.run_id

        # finalize name
        self.name = self.name + '_run_' + str(self.run_id)

        # checkpoints
        self.ckpt_dir = os.path.join("ckpts", self.name)

        # visdom setup
        self.viz_on = args.viz_on
        if self.viz_on:
            self.win_id = dict(recon='win_recon',
                               loss_kl='win_loss_kl',
                               loss_recon='win_loss_recon',
                               ade_min='win_ade_min',
                               fde_min='win_fde_min',
                               ade_avg='win_ade_avg',
                               fde_avg='win_fde_avg',
                               ade_std='win_ade_std',
                               fde_std='win_fde_std',
                               test_loss_recon='win_test_loss_recon',
                               test_loss_kl='win_test_loss_kl',
                               loss_recon_prior='win_loss_recon_prior',
                               loss_coll='win_loss_coll',
                               test_loss_coll='win_test_loss_coll',
                               test_total_coll='win_test_total_coll',
                               total_coll='win_total_coll')
            self.line_gather = DataGather(
                'iter', 'loss_recon', 'loss_kl', 'loss_recon_prior', 'ade_min',
                'fde_min', 'ade_avg', 'fde_avg', 'ade_std', 'fde_std',
                'test_loss_recon', 'test_loss_kl', 'test_loss_coll',
                'loss_coll', 'test_total_coll', 'total_coll')

            self.viz_port = args.viz_port  # port number, eg, 8097
            self.viz = visdom.Visdom(port=self.viz_port, env=self.name)
            self.viz_ll_iter = args.viz_ll_iter
            self.viz_la_iter = args.viz_la_iter

            self.viz_init()
        #### create a new model or load a previously saved model

        self.ckpt_load_iter = args.ckpt_load_iter

        self.obs_len = 8
        self.pred_len = 12
        self.num_layers = args.num_layers
        self.decoder_h_dim = args.decoder_h_dim

        if self.ckpt_load_iter == 0 or args.dataset_name == 'all':  # create a new model
            lg_cvae_path = 'large.lgcvae_enc_block_1_fcomb_block_2_wD_10_lr_0.0001_lg_klw_1.0_a_0.25_r_2.0_fb_5.0_anneal_e_10_load_e_3_run_4'
            lg_cvae_path = os.path.join('ckpts', lg_cvae_path,
                                        'iter_150_lg_cvae.pt')
            if self.device == 'cuda':
                self.lg_cvae = torch.load(lg_cvae_path)

            self.encoderMx = EncoderX(args.zS_dim,
                                      enc_h_dim=args.encoder_h_dim,
                                      mlp_dim=args.mlp_dim,
                                      map_mlp_dim=args.map_mlp_dim,
                                      map_feat_dim=args.map_feat_dim,
                                      num_layers=args.num_layers,
                                      dropout_mlp=args.dropout_mlp,
                                      dropout_rnn=args.dropout_rnn,
                                      device=self.device).to(self.device)
            self.encoderMy = EncoderY(args.zS_dim,
                                      enc_h_dim=args.encoder_h_dim,
                                      mlp_dim=args.mlp_dim,
                                      num_layers=args.num_layers,
                                      dropout_mlp=args.dropout_mlp,
                                      dropout_rnn=args.dropout_rnn,
                                      device=self.device).to(self.device)
            self.decoderMy = Decoder(args.pred_len,
                                     dec_h_dim=self.decoder_h_dim,
                                     enc_h_dim=args.encoder_h_dim,
                                     mlp_dim=args.mlp_dim,
                                     z_dim=args.zS_dim,
                                     num_layers=args.num_layers,
                                     device=args.device,
                                     dropout_rnn=args.dropout_rnn,
                                     scale=args.scale,
                                     dt=self.dt,
                                     context_dim=args.context_dim).to(
                                         self.device)

        else:  # load a previously saved model
            print('Loading saved models (iter: %d)...' % self.ckpt_load_iter)
            self.load_checkpoint()
            print('...done')

        # get VAE parameters
        vae_params = \
            list(self.encoderMx.parameters()) + \
            list(self.encoderMy.parameters()) + \
            list(self.decoderMy.parameters())
        # create optimizers
        self.optim_vae = optim.Adam(vae_params,
                                    lr=self.lr_VAE,
                                    betas=[self.beta1_VAE, self.beta2_VAE])

        self.scheduler = optim.lr_scheduler.LambdaLR(
            optimizer=self.optim_vae, lr_lambda=lambda epoch: args.lr_e**epoch)

        print('Start loading data...')

        if self.ckpt_load_iter != self.max_iter:
            print("Initializing train dataset")
            _, self.train_loader = data_loader(self.args,
                                               args.dataset_dir,
                                               'train',
                                               shuffle=True)
            print("Initializing val dataset")
            _, self.val_loader = data_loader(self.args,
                                             args.dataset_dir,
                                             'val',
                                             shuffle=True)

            print('There are {} iterations per epoch'.format(
                len(self.train_loader.dataset) / args.batch_size))
        print('...done')
Exemple #10
0
def main(args):
    if args.ckpt_load_iter == args.max_iter:

        print("Initializing test dataset")
        solver = Solver(args)

        print('--------------------', args.dataset_name, '----------------------')
        args.batch_size = 4

        # cfg = Config('nuscenes', False, create_dirs=True)
        # torch.set_default_dtype(torch.float32)
        # log = open('log.txt', 'a+')
        # test_loader = data_generator(cfg, log, split='test', phase='testing',
        #                              batch_size=args.batch_size, device=args.device, scale=args.scale, shuffle=False)


        _, test_loader = data_loader(args, args.dataset_dir, 'test', shuffle=False)


        # solver.load_checkpoint()
        # solver.check_feat(test_loader)

        # solver.evaluate_lg(test_loader, num_gen=3)
        # solver.evaluate_each(test_loader)
        # solver.collision_stat(test_loader)
        solver.evaluate_dist(test_loader, loss=True)
        #
        # fde_min, fde_avg, fde_std = solver.evaluate_dist(test_loader, loss=False)
        # print(fde_min)
        # print(fde_avg)
        # print(fde_std)


        gh = True
        print("GEN HEAT MAP: ", gh)

        traj_path = 'sdd.traj_zD_20_dr_mlp_0.3_dr_rnn_0.25_enc_hD_64_dec_hD_128_mlpD_256_map_featD_32_map_mlpD_256_lr_0.001_klw_50.0_ll_prior_w_1.0_zfb_0.07_scale_100.0_num_sg_3_run_200'

        traj_iter = '15000'
        traj_ckpt = {'ckpt_dir': os.path.join('ckpts', traj_path), 'iter': traj_iter}
        print('===== TRAJECTORY:', traj_ckpt)

        # lg_path = 'lgcvae_enc_block_1_fcomb_block_2_wD_20_lr_0.001_lg_klw_1_a_0.25_r_2.0_fb_0.5_anneal_e_0_load_e_1_run_24'
        # lg_iter = '57100'

        lg_path = 'sdd.lgcvae_enc_block_1_fcomb_block_2_wD_20_lr_0.0001_lg_klw_1.0_a_0.25_r_2.0_fb_0.5_anneal_e_0_aug_1_run_181'
        lg_iter = '43000'
        lg_ckpt = {'ckpt_dir': os.path.join('ckpts', lg_path), 'iter': lg_iter}
        print('===== LG CVAE:', lg_ckpt)

        solver.pretrain_load_checkpoint(traj_ckpt, lg_ckpt)

        # solver.check_feat(test_loader)

        # solver.plot_traj_var(test_loader)
        # solver.evaluate_dist_gt_goal(test_loader)
        # solver.check_feat(test_loader)

        lg_num=20
        traj_num=1

        ade_min, fde_min, \
        ade_avg, fde_avg, \
        ade_std, fde_std, \
        sg_ade_min, sg_ade_avg, sg_ade_std, \
        lg_fde_min, lg_fde_avg, lg_fde_std = solver.all_evaluation(test_loader, lg_num=lg_num, traj_num=traj_num, generate_heat=gh)

        print('lg_num: ', lg_num, ' // traj_num: ', traj_num)
        print('ade min: ', ade_min)
        print('ade avg: ', ade_avg)
        print('ade std: ', ade_std)
        print('fde min: ', fde_min)
        print('fde avg: ', fde_avg)
        print('fde std: ', fde_std)
        print('sg_ade_min: ', sg_ade_min)
        print('sg_ade_avg: ', sg_ade_avg)
        print('sg_ade_std: ', sg_ade_std)
        print('lg_fde_min: ', lg_fde_min)
        print('lg_fde_avg: ', lg_fde_avg)
        print('lg_fde_std: ', lg_fde_std)
        print('------------------------------------------')

        lg_num = 10
        traj_num = 2

        ade_min, fde_min, \
        ade_avg, fde_avg, \
        ade_std, fde_std, \
        sg_ade_min, sg_ade_avg, sg_ade_std, \
        lg_fde_min, lg_fde_avg, lg_fde_std = solver.all_evaluation(test_loader, lg_num=lg_num, traj_num=traj_num, generate_heat=gh)

        print('lg_num: ', lg_num, ' // traj_num: ', traj_num)
        print('ade min: ', ade_min)
        print('ade avg: ', ade_avg)
        print('ade std: ', ade_std)
        print('fde min: ', fde_min)
        print('fde avg: ', fde_avg)
        print('fde std: ', fde_std)
        print('sg_ade_min: ', sg_ade_min)
        print('sg_ade_avg: ', sg_ade_avg)
        print('sg_ade_std: ', sg_ade_std)
        print('lg_fde_min: ', lg_fde_min)
        print('lg_fde_avg: ', lg_fde_avg)
        print('lg_fde_std: ', lg_fde_std)
        print('------------------------------------------')


        ade_min, fde_min, \
        ade_avg, fde_avg, \
        ade_std, fde_std, \
        sg_ade_min, sg_ade_avg, sg_ade_std, \
        lg_fde_min, lg_fde_avg, lg_fde_std = solver.evaluate_dist(test_loader, loss=False)

        print('ade min: ', ade_min)
        print('ade avg: ', ade_avg)
        print('ade std: ', ade_std)
        print('fde min: ', fde_min)
        print('fde avg: ', fde_avg)
        print('fde std: ', fde_std)
        print('sg_ade_min: ', sg_ade_min)
        print('sg_ade_avg: ', sg_ade_avg)
        print('sg_ade_std: ', sg_ade_std)
        print('lg_fde_min: ', lg_fde_min)
        print('lg_fde_avg: ', lg_fde_avg)
        print('lg_fde_std: ', lg_fde_std)
        print('------------------------------------------')


    else:
        solver = Solver(args)
        solver.train()
Exemple #11
0
import seaborn as sns

from maf import MAF
from utils_maf import (
    val_maf,
    test_maf,
    sample_digits_maf,
)
from data.loader import data_loader

string = "maf_mnist_512"
dataset = "mnist"
batch_size = 128

model = torch.load("model_saves/" + string + ".pt")
train, train_loader, val_loader, test_loader, n_in = data_loader(
    dataset, batch_size)
test_maf(model, train, test_loader)
val_maf(model, train, val_loader)
# sample_digits_maf(model, "test")

if dataset == "mnist":
    if not os.path.exists("figs"):
        os.makedirs("figs")
    _, _, _, test_loader, _ = data_loader(dataset, batch_size=1000)
    model.eval()
    batch = next(iter(test_loader))
    u = model(batch)[0].detach().numpy()
    fig, axes = plt.subplots(ncols=6,
                             nrows=4,
                             sharex=True,
                             sharey=True,
Exemple #12
0
def detect(opt, save_img=False):
    global rolling_data
    out, source, weights, view_img, save_txt, imgsz = \
        opt.output, opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
    webcam = source == '0' or source.startswith('rtsp') or source.startswith(
        'http') or source.endswith('.txt')

    # initialize deepsort
    cfg = get_config()
    cfg.merge_from_file(opt.config_deepsort)
    deepsort = DeepSort(cfg.DEEPSORT.REID_CKPT,
                        max_dist=cfg.DEEPSORT.MAX_DIST,
                        min_confidence=cfg.DEEPSORT.MIN_CONFIDENCE,
                        nms_max_overlap=cfg.DEEPSORT.NMS_MAX_OVERLAP,
                        max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE,
                        max_age=cfg.DEEPSORT.MAX_AGE,
                        n_init=cfg.DEEPSORT.N_INIT,
                        nn_budget=cfg.DEEPSORT.NN_BUDGET,
                        use_cuda=True)

    # Initialize
    device = select_device(opt.device)
    if os.path.exists(out):
        shutil.rmtree(out)  # delete output folder
    os.makedirs(out)  # make new output folder
    half = device.type != 'cpu'  # half precision only supported on CUDA

    # Load model
    model = torch.load(weights,
                       map_location=device)['model'].float()  # load to FP32
    model.to(device).eval()
    if half:
        model.half()  # to FP16

    # Set Dataloader
    vid_path, vid_writer = None, None
    if webcam:
        view_img = True
        cudnn.benchmark = True  # set True to speed up constant image size inference
        dataset = LoadStreams(source, img_size=imgsz)
    else:
        view_img = True
        save_img = True
        dataset = LoadImages(source, img_size=imgsz)

    # Get names and colors
    names = model.module.names if hasattr(model, 'module') else model.names

    # Run inference
    t0 = time.time()
    img = torch.zeros((1, 3, imgsz, imgsz), device=device)  # init img
    # run once
    _ = model(img.half() if half else img) if device.type != 'cpu' else None

    save_path = str(Path(out))
    txt_path = str(Path(out)) + '/results.txt'

    for frame_idx, (path, img, im0s, vid_cap) in enumerate(dataset):
        img = torch.from_numpy(img).to(device)
        img = img.half() if half else img.float()  # uint8 to fp16/32
        img /= 255.0  # 0 - 255 to 0.0 - 1.0
        if img.ndimension() == 3:
            img = img.unsqueeze(0)

        # Inference
        t1 = time_synchronized()
        pred = model(img, augment=opt.augment)[0]

        # Apply NMS
        pred = non_max_suppression(pred,
                                   opt.conf_thres,
                                   opt.iou_thres,
                                   classes=opt.classes,
                                   agnostic=opt.agnostic_nms)
        t2 = time_synchronized()

        # Process detections
        for i, det in enumerate(pred):  # detections per image
            if webcam:  # batch_size >= 1
                p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()
            else:
                p, s, im0 = path, '', im0s

            s += '%gx%g ' % img.shape[2:]  # print string
            save_path = str(Path(out) / Path(p).name)

            if det is not None and len(det):
                # Rescale boxes from img_size to im0 size
                det[:, :4] = scale_coords(img.shape[2:], det[:, :4],
                                          im0.shape).round()

                # Print results
                for c in det[:, -1].unique():
                    n = (det[:, -1] == c).sum()  # detections per class
                    s += '%g %ss, ' % (n, names[int(c)])  # add to string

                bbox_xywh = []
                confs = []

                # Adapt detections to deep sort input format
                for *xyxy, conf, cls in det:
                    x_c, y_c, bbox_w, bbox_h = bbox_rel(*xyxy)
                    obj = [x_c, y_c, bbox_w, bbox_h]
                    bbox_xywh.append(obj)
                    confs.append([conf.item()])

                xywhs = torch.Tensor(bbox_xywh)
                confss = torch.Tensor(confs)

                # Pass detections to deepsort
                outputs = deepsort.update(xywhs, confss, im0)

                # draw boxes for visualization
                if len(outputs) > 0:
                    bbox_xyxy = outputs[:, :4]
                    identities = outputs[:, -1]
                    draw_boxes(im0, bbox_xyxy, identities)

                if len(rolling_data) > 50:
                    rolling_data = rolling_data[:50]

                print(len(rolling_data))
                # Write MOT compliant results to file
                if save_txt and len(outputs) != 0:
                    for j, output in enumerate(outputs):
                        bbox_left = output[0]
                        bbox_top = output[1]
                        bbox_w = output[2]
                        bbox_h = output[3]
                        identity = output[-1]

                        rolling_data.insert(0, [
                            frame_idx, identity, bbox_left, bbox_top, bbox_w,
                            bbox_h
                        ])

                        print('inserted! now', len(rolling_data))
                        with open(txt_path, 'a') as f:
                            f.write(('%g ' * 10 + '\n') %
                                    (frame_idx, identity, bbox_left, bbox_top,
                                     bbox_w, bbox_h, -1, -1, -1,
                                     -1))  # label format

            else:
                deepsort.increment_ages()

            # Print time (inference + NMS)
            print('%sDone. (%.3fs)' % (s, t2 - t1))
            # create a new dataset and predict every t2-t1 seconds
            # load dataset
            # eval w/ prediction
            # ???
            # profit

            data_string = ''
            for entry in rolling_data:
                new_line = ''
                for d in entry:
                    new_line += str(d) + ' '
                data_string += new_line + '\n'
            # print(data_string)

            _, loader = data_loader(
                None,
                data_string)  # this must be done dynamically (every x frames)
            # 'DATA' needs to be a string in file format, ex. 1 2 10 10 20 20\n
            #                                                 2 2 11 10 20 20\n and so on.
            #  also needs to be called every frame (may need to optimize)
            tt1 = time.time()
            prediction = evaluate(loader, generator)
            print(prediction)
            np.savetxt('predictions.txt', prediction)
            print('time taken', time.time() - tt1, 'seconds')

            # Stream results
            if view_img:
                cv2.imshow(p, im0)
                if cv2.waitKey(1) == ord('q'):  # q to quit
                    raise StopIteration

            # Save results (image with detections)
            if save_img:
                print('saving img!')
                if dataset.mode == 'images':
                    cv2.imwrite(save_path, im0)
                else:
                    print('saving video!')
                    if vid_path != save_path:  # new video
                        vid_path = save_path
                        if isinstance(vid_writer, cv2.VideoWriter):
                            vid_writer.release(
                            )  # release previous video writer

                        fps = vid_cap.get(cv2.CAP_PROP_FPS)
                        w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
                        h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
                        vid_writer = cv2.VideoWriter(
                            save_path, cv2.VideoWriter_fourcc(*opt.fourcc),
                            fps, (w, h))
                    vid_writer.write(im0)

    if save_txt or save_img:
        print('Results saved to %s' % os.getcwd() + os.sep + out)
        if platform == 'darwin':  # MacOS
            os.system('open ' + save_path)

    print('Done. (%.3fs)' % (time.time() - t0))