示例#1
0
def main(args):

    # metadata for training
    trajectory_length = 20
    prediction_length = trajectory_length // 2

    print("************* Loading Dataset ***************")
    dataset = Dataset()
    dataset.load_data(args.dataset)
    training_data, testing_data = dataset.get_train_validation_batch(
        trajectory_length)

    # # reduce the size of training data..
    if args.truncated:
        print("Using truncated data")
        training_data = training_data[:10]
        testing_data = testing_data[:10]

    # experiment configs
    # experiment_embedding_size = [64]
    # experiment_hidden_size = [128]
    experiment_embedding_size = args.embedding_size
    experiment_hidden_size = args.hidden_size

    for embedding_size in experiment_embedding_size:
        for hidden_size in experiment_hidden_size:

            social_model = SocialModel(hidden_size=hidden_size,
                                       embedding_size=embedding_size)

            experiment(social_model,
                       training_data,
                       testing_data,
                       hidden_size,
                       embedding_size,
                       args,
                       model_name='social_lstm')

            # lstm_model = VanillaLSTMModel(hidden_size, embedding_size)
            # experiment(
            #     lstm_model, training_data, testing_data,
            #     hidden_size = hidden_size,
            #     embedding_size = embedding_size,
            #     num_epochs = 100,
            #     lr = args.lr,
            #     model_name = 'vanilla_lstm',
            # )

    print("done!")
示例#2
0
    args = parser.parse_args()

    if args.which is 'inference':
        # do inference on random samples
        trajectory_length = args.trajectory_length
        prediction_length = args.prediction_length

        model = SocialModel(args.embedding_size, args.hidden_size)
        with open(args.model_path, 'rb') as f:
            state_dict = torch.load(f)
            model.load_state_dict(state_dict)
        # load data
        if not args.dummy:
            dataset = Dataset()
            dataset.load_data('./data_transformed.h5')
            training_data, testing_data = dataset.get_train_validation_batch(
                trajectory_length)
            # random sample

            # load random model
            for i in tqdm(range(20), desc="Inferring..."):
                plot_inference(choice(training_data), model, prediction_length,
                               'sample_data_{}.png'.format(i + 1))
        else:
            for i in range(20):
                training_data, testing_data = generate_fake_data(
                    20, 20, 10, 0.05, 0.1)
                plot_inference(training_data, model, args.prediction_length,
                               "fake_{}.png".format(i))
    else: