import torch
import pandas as pd

data_dir = 'datasets/dataset_aux_sgan/train/train'

files_path = []
all_files = os.listdir(data_dir)
for file in all_files:
    if os.path.splitext(file)[1] == '.ndjson':
        files_path.append(file)

all_files = [os.path.join(data_dir, _path) for _path in files_path]
seq_list = []
seq_start_end = []
for path in all_files:
    train_scenes = list(trajnettools.load_all(path, scene_type=None))
    seq_list_prev = [scene for _, scene in train_scenes]
    seq_start_end.append(len(seq_list_prev))
    seq_list_prev = np.concatenate(seq_list_prev, axis=1)
    seq_list_prev = np.asarray(seq_list_prev)
    seq_list.append(seq_list_prev)


def remove_nan(train_scenes):
    list_final = []
    len_train_scenes = len(train_scenes)
    for i in range(len_train_scenes):
        scene_list = train_scenes[i].tolist()
        conc_scene = np.concatenate(scene_list, axis=1)
        conc_scene_df = pd.DataFrame(conc_scene)
        conc_scene_df_no_nan = conc_scene_df.dropna(axis=0, how='any')
def main(epochs=50):
    parser = argparse.ArgumentParser()
    parser.add_argument('--epochs', default=epochs, type=int,
                        help='number of epochs')
    parser.add_argument('--obs_length', default=9, type=int,
                        help='observation length')
    parser.add_argument('--pred_length', default=12, type=int,
                        help='prediction length')
    parser.add_argument('--batch_size', default=1, type=int,
                        help='number of epochs')
    parser.add_argument('--lr', default=1e-3, type=float,
                        help='initial learning rate')
    parser.add_argument('--type', default='vanilla',
                        choices=('vanilla', 'occupancy', 'directional', 'social', 'hiddenstatemlp',
                                 'directionalmlp'),
                        help='type of LSTM to train')
    parser.add_argument('-o', '--output', default=None,
                        help='output file')
    parser.add_argument('--disable-cuda', action='store_true',
                        help='disable CUDA')
    parser.add_argument('--front', action='store_true',
                        help='Front pooling')
    parser.add_argument('--fast', action='store_true',
                        help='Fast pooling (Under devpt)')
    parser.add_argument('--path', default='trajdata',
                        help='glob expression for data files')
    parser.add_argument('--loss', default='L2',
                        help='loss function')

    pretrain = parser.add_argument_group('pretraining')
    pretrain.add_argument('--load-state', default=None,
                          help='load a pickled model state dictionary before training')
    pretrain.add_argument('--load-full-state', default=None,
                          help='load a pickled full state dictionary before training')
    pretrain.add_argument('--nonstrict-load-state', default=None,
                          help='load a pickled state dictionary before training')

    hyperparameters = parser.add_argument_group('hyperparameters')
    hyperparameters.add_argument('--k', type=int, default=3,
                                 help='number of samples for variety loss')
    hyperparameters.add_argument('--hidden-dim', type=int, default=128,
                                 help='RNN hidden dimension')
    hyperparameters.add_argument('--coordinate-embedding-dim', type=int, default=64,
                                 help='coordinate embedding dimension')
    hyperparameters.add_argument('--cell_side', type=float, default=1.0,
                                 help='cell size of real world')
    hyperparameters.add_argument('--n', type=int, default=10,
                                 help='number of cells per side')

    hyperparameters.add_argument('--noise_dim', type=int, default=16,
                                 help='dimension of z')
    hyperparameters.add_argument('--add_noise', action='store_true',
                                 help='To Add Noise')
    hyperparameters.add_argument('--noise_type', default='gaussian',
                                 help='type of noise to be added')
    hyperparameters.add_argument('--discriminator', action='store_true',
                                 help='discriminator to be added')
    args = parser.parse_args()

    # torch.autograd.set_detect_anomaly(True)

    if not os.path.exists('OUTPUT_BLOCK/{}'.format(args.path)):
        os.makedirs('OUTPUT_BLOCK/{}'.format(args.path))
    if args.output:
        args.output = 'OUTPUT_BLOCK/{}/{}_{}.pkl'.format(args.path, args.type, args.output)
    else:
        args.output = 'OUTPUT_BLOCK/{}/{}.pkl'.format(args.path, args.type)

    # configure logging
    from pythonjsonlogger import jsonlogger
    if args.load_full_state:
        file_handler = logging.FileHandler(args.output + '.log', mode='a')
    else:
        file_handler = logging.FileHandler(args.output + '.log', mode='w')
    file_handler.setFormatter(jsonlogger.JsonFormatter('(message) (levelname) (name) (asctime)'))
    stdout_handler = logging.StreamHandler(sys.stdout)
    logging.basicConfig(level=logging.INFO, handlers=[stdout_handler, file_handler])
    logging.info({
        'type': 'process',
        'argv': sys.argv,
        'args': vars(args),
        'version': VERSION,
        'hostname': socket.gethostname(),
    })

    # refactor args for --load-state
    args.load_state_strict = True
    if args.nonstrict_load_state:
        args.load_state = args.nonstrict_load_state
        args.load_state_strict = False
    if args.load_full_state:
        args.load_state = args.load_full_state

    # add args.device
    args.device = torch.device('cpu')
    # if not args.disable_cuda and torch.cuda.is_available():
    #     args.device = torch.device('cuda')

    # read in datasets
    args.path = 'DATA_BLOCK/' + args.path

    train_scenes = list(trajnettools.load_all(args.path + '/train/**/*.ndjson'))
    val_scenes = list(trajnettools.load_all(args.path + '/val/**/*.ndjson'))

    # create model

    # pooling
    pool = None
    if args.type == 'hiddenstatemlp':
        pool = HiddenStateMLPPooling(hidden_dim=args.hidden_dim)
    elif args.type != 'vanilla':
        if args.fast:
            pool = FastPooling(type_=args.type, hidden_dim=args.hidden_dim,
                               cell_side=args.cell_side, n=args.n, front=args.front)
        else:
            pool = Pooling(type_=args.type, hidden_dim=args.hidden_dim,
                           cell_side=args.cell_side, n=args.n, front=args.front)

    # generator
    lstm_generator = LSTMGenerator(embedding_dim=args.coordinate_embedding_dim, hidden_dim=args.hidden_dim,
                                   pool=pool, noise_dim=args.noise_dim, add_noise=args.add_noise,
                                   noise_type=args.noise_type)

    # discriminator
    print("discriminator: ", args.discriminator)
    lstm_discriminator = None
    if args.discriminator:
        lstm_discriminator = LSTMDiscriminator(embedding_dim=args.coordinate_embedding_dim,
                                               hidden_dim=args.hidden_dim, pool=pool)

    # GAN model
    model = SGAN(generator=lstm_generator, discriminator=lstm_discriminator,
                 add_noise=args.add_noise, k=args.k)

    # Default Load
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4) # , weight_decay=1e-4
    lr_scheduler = None
    start_epoch = 0

    # train
    if args.load_state:
        # load pretrained model.
        # useful for tranfer learning
        with open(args.load_state, 'rb') as f:
            checkpoint = torch.load(f)
        pretrained_state_dict = checkpoint['state_dict']
        model.load_state_dict(pretrained_state_dict, strict=args.load_state_strict)

        if args.load_full_state:
        # load optimizers from last training
        # useful to continue training
            optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) # , weight_decay=1e-4
            optimizer.load_state_dict(checkpoint['optimizer'])
            lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 15)
            lr_scheduler.load_state_dict(checkpoint['scheduler'])
            start_epoch = checkpoint['epoch']

    #trainer
    trainer = Trainer(model, optimizer=optimizer, lr_scheduler=lr_scheduler, device=args.device,
                      criterion=args.loss, batch_size=args.batch_size, obs_length=args.obs_length,
                      pred_length=args.pred_length)
    trainer.loop(train_scenes, val_scenes, args.output, epochs=args.epochs, start_epoch=start_epoch)
Beispiel #3
0
def main(epochs=35):
    parser = argparse.ArgumentParser()
    parser.add_argument('--epochs', default=epochs, type=int,
                        help='number of epochs')
    parser.add_argument('--scene-mode',
                        help='Model type to be trained, can be RRB, RRB_M, EDN, EDN_M')
    parser.add_argument('--n_obs', default=9, type=int,
                        help='number of observation frames')
    parser.add_argument('--n_pred', default=12, type=int,
                        help='number of prediction frames')
    parser.add_argument('--train-input-files', type=str,
                        help='glob expression for train input files')
    parser.add_argument('--val-input-files',
                        help='glob expression for validation input files')
    parser.add_argument('--disable-cuda', default=1, type=int,
                        help='disable CUDA')
    parser.add_argument('--lr', default=1e-3, type=float,
                        help='initial learning rate')
    pretrain = parser.add_argument_group('pretraining')
    pretrain.add_argument('--load-state', default=None,
                          help='load a pickled state dictionary before training')

    args = parser.parse_args()
    # set model output file
    timestamp = datetime.datetime.utcnow().strftime('%Y_%m_%d_%H%M%S')
    # rename all previous output files to remove 'active' keyword
    baseAdd = 'output/'
    myList = os.listdir(baseAdd)
    outFiles = [i for i in myList if (i[:6] == 'active')]
    for i in outFiles:
        os.rename(baseAdd + i, baseAdd + i[7:])
    output_dir = 'output/{}_{}.pkl'.format(args.scene_mode, timestamp)
    # configure logging
    from pythonjsonlogger import jsonlogger
    import socket
    import sys
    file_handler = logging.FileHandler(output_dir + '.log', mode='w')
    file_handler.setFormatter(jsonlogger.JsonFormatter('(message) (levelname) (name) (asctime)'))
    stdout_handler = logging.StreamHandler(sys.stdout)
    logging.basicConfig(level=logging.INFO, handlers=[stdout_handler, file_handler])
    logging.info({
        'type': 'process',
        'argv': sys.argv,
        'args': vars(args),
        'version': VERSION,
        'hostname': socket.gethostname(),
    })

    # add args.device
    args.device = torch.device('cpu')
    if not args.disable_cuda and torch.cuda.is_available():
        args.device = torch.device('cuda:0')
    print(args.device)
    # read in datasets
    train_scenes = list(trajnettools.load_all(args.train_input_files+'*.ndjson',
                                              sample={'syi.ndjson': 0.0}))
    val_scenes = list(trajnettools.load_all(args.val_input_files+'*.ndjson',
                                            sample={'syi.ndjson': 0.0}))

    print('number of train scenes =' + str(len(train_scenes)))
    for idd, i in enumerate(train_scenes):
        for id, jj in enumerate(i[2]):
            flag = 0
            for k in jj:
                if k[1] == k[1] and k[0] == k[0]:  # it is not nan
                    flag = 1
                    break
            if (flag == 0):
                print("in scene", idd, "frame", id, "all the pos are nan")
    baseAdd = args.train_input_files
    trainFiles = os.listdir(baseAdd)
    baseAdd = args.val_input_files
    valFiles = os.listdir(baseAdd)
    logging.info({'train files are : {trainFiles}'.format(trainFiles=trainFiles)})
    logging.info({'val files are : {valFiles}'.format(valFiles=valFiles)})
    # create model
    if (args.scene_mode == 'EDN'):
        model = EDN(n_obs=args.n_obs, n_pred=args.n_pred, device=args.device)
    elif (args.scene_mode == 'EDN_M'):
        model = EDN_M(n_obs=args.n_obs, n_pred=args.n_pred, device=args.device)
    elif (args.scene_mode == 'RRB'):
        model = RRB(n_obs=args.n_obs, n_pred=args.n_pred, device=args.device)
    elif (args.scene_mode == 'RRB_M'):
        model = RRB_M(n_obs=args.n_obs, n_pred=args.n_pred, device=args.device)
        with open(
                "output/final_models/EDN/EDN_M_sceneGeneralization.pkl.state_dict",
                'rb') as f:
            pretrained_dict = torch.load(f)
        trained_blocks = ['encoder_traj', 'decoder', 'encoder_vehs', 'regressor', 'cnn']
        model.load_state_dict(pretrained_dict, strict=False)
        for i in model.named_parameters():
            for j in trained_blocks:
                if (j in i[0]):
                    i[1].requires_grad = False
    else:
        print("ambigues model type")

    torch.backends.cudnn.enabled = False  # disabled as the function used to rotate scene didn't work without it
    weight_decay = 1e-2
    num_epochs_reduce_lr = 2
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=weight_decay)  # weight_decay=1e-4
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, num_epochs_reduce_lr, gamma=0.5)

    if args.load_state:
        print("Loading Model Dict")
        with open(args.load_state, 'rb') as f:
            checkpoint = torch.load(f)
        pretrained_state_dict = checkpoint['state_dict']
        model.load_state_dict(pretrained_state_dict, strict=False)
        model = model.to(args.device)

    trainer = Trainer(timestamp, model, optimizer=optimizer, device=args.device, n_obs=args.n_obs, n_pred=args.n_pred,
                      lr_scheduler=lr_scheduler, scene_mode=args.scene_mode)
    trainer.loop(train_scenes, val_scenes, output_dir, epochs=args.epochs)
    trainer.writer.close()
Beispiel #4
0
def main(epochs=50):
    parser = argparse.ArgumentParser()
    parser.add_argument('--epochs',
                        default=epochs,
                        type=int,
                        help='number of epochs')
    parser.add_argument('--lr',
                        default=1e-3,
                        type=float,
                        help='initial learning rate')
    parser.add_argument('--type',
                        default='vanilla',
                        choices=('vanilla', 'occupancy', 'directional',
                                 'social', 'hiddenstatemlp'),
                        help='type of LSTM to train')
    parser.add_argument('-o', '--output', default=None, help='output file')
    parser.add_argument('--disable-cuda',
                        action='store_true',
                        help='disable CUDA')
    parser.add_argument('--path',
                        default='trajdata',
                        help='glob expression for data files')
    parser.add_argument('--loss', default='L2', help='loss function')

    pretrain = parser.add_argument_group('pretraining')
    pretrain.add_argument(
        '--load-state',
        default=None,
        help='load a pickled model state dictionary before training')
    pretrain.add_argument(
        '--load-full-state',
        default=None,
        help='load a pickled full state dictionary before training')
    pretrain.add_argument(
        '--nonstrict-load-state',
        default=None,
        help='load a pickled state dictionary before training')

    hyperparameters = parser.add_argument_group('hyperparameters')
    hyperparameters.add_argument('--hidden-dim',
                                 type=int,
                                 default=128,
                                 help='RNN hidden dimension')
    hyperparameters.add_argument('--coordinate-embedding-dim',
                                 type=int,
                                 default=64,
                                 help='coordinate embedding dimension')
    hyperparameters.add_argument('--cell_side',
                                 type=float,
                                 default=2.0,
                                 help='cell size of real world')
    args = parser.parse_args()

    # set model output file
    timestamp = datetime.datetime.utcnow().strftime('%Y%m%d_%H%M%S')
    # if args.output is None:
    #     args.output = 'output/{}_lstm_{}.pkl'.format(args.type, timestamp)
    if not os.path.exists('OUTPUT_BLOCK/{}'.format(args.path)):
        os.makedirs('OUTPUT_BLOCK/{}'.format(args.path))
    if args.output:
        args.output = 'OUTPUT_BLOCK/{}/{}_{}.pkl'.format(
            args.path, args.type, args.output)
    else:
        args.output = 'OUTPUT_BLOCK/{}/{}.pkl'.format(args.path, args.type)

    # configure logging
    from pythonjsonlogger import jsonlogger
    import socket
    import sys
    if args.load_full_state:
        file_handler = logging.FileHandler(args.output + '.log', mode='a')
    else:
        file_handler = logging.FileHandler(args.output + '.log', mode='w')
    file_handler.setFormatter(
        jsonlogger.JsonFormatter('(message) (levelname) (name) (asctime)'))
    stdout_handler = logging.StreamHandler(sys.stdout)
    logging.basicConfig(level=logging.INFO,
                        handlers=[stdout_handler, file_handler])
    logging.info({
        'type': 'process',
        'argv': sys.argv,
        'args': vars(args),
        'version': VERSION,
        'hostname': socket.gethostname(),
    })

    # refactor args for --load-state
    args.load_state_strict = True
    if args.nonstrict_load_state:
        args.load_state = args.nonstrict_load_state
        args.load_state_strict = False
    if args.load_full_state:
        args.load_state = args.load_full_state

    # add args.device
    args.device = torch.device('cpu')
    # if not args.disable_cuda and torch.cuda.is_available():
    #     args.device = torch.device('cuda')

    # read in datasets
    args.path = 'DATA_BLOCK/' + args.path

    train_scenes = list(trajnettools.load_all(args.path +
                                              '/train/**/*.ndjson'))
    val_scenes = list(trajnettools.load_all(args.path + '/val/**/*.ndjson'))

    # create model
    pool = None
    if args.type == 'hiddenstatemlp':
        pool = HiddenStateMLPPooling(hidden_dim=args.hidden_dim)
    elif args.type != 'vanilla':
        pool = Pooling(type_=args.type,
                       hidden_dim=args.hidden_dim,
                       cell_side=args.cell_side)
    model = LSTM(pool=pool,
                 embedding_dim=args.coordinate_embedding_dim,
                 hidden_dim=args.hidden_dim)
    # Default Load
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=1e-4)
    lr_scheduler = None
    start_epoch = 0

    # train
    if args.load_state:
        # load pretrained model.
        # useful for tranfer learning
        with open(args.load_state, 'rb') as f:
            checkpoint = torch.load(f)
        pretrained_state_dict = checkpoint['state_dict']
        model.load_state_dict(pretrained_state_dict,
                              strict=args.load_state_strict)

        if args.load_full_state:
            # load optimizers from last training
            # useful to continue training
            optimizer = torch.optim.Adam(model.parameters(),
                                         lr=args.lr,
                                         weight_decay=1e-4)
            optimizer.load_state_dict(checkpoint['optimizer'])
            lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 15)
            lr_scheduler.load_state_dict(checkpoint['scheduler'])
            start_epoch = checkpoint['epoch']

    #trainer
    trainer = Trainer(model,
                      optimizer=optimizer,
                      lr_scheduler=lr_scheduler,
                      device=args.device,
                      loss=args.loss)
    trainer.loop(train_scenes,
                 val_scenes,
                 args.output,
                 epochs=args.epochs,
                 start_epoch=start_epoch)