Beispiel #1
0
def main():

    parser = argparse.ArgumentParser()
    # Observed length of the trajectory parameter
    parser.add_argument('--obs_length',
                        type=int,
                        default=8,
                        help='Observed length of the trajectory')
    # Predicted length of the trajectory parameter
    parser.add_argument('--pred_length',
                        type=int,
                        default=12,
                        help='Predicted length of the trajectory')

    # Train Dataset
    # Use like:
    # python transpose_inrange.py --train_dataset index_1 index_2 ...
    parser.add_argument(
        '-l',
        '--train_dataset',
        nargs='+',
        help=
        '<Required> training dataset(s) the model is trained on: --train_dataset index_1 index_2 ...',
        default=[0, 1, 2, 4],
        type=int)

    # Test dataset
    parser.add_argument('--test_dataset',
                        type=int,
                        default=3,
                        help='Dataset to be tested on')

    # Model to be loaded
    parser.add_argument('--epoch',
                        type=int,
                        default=26,
                        help='Epoch of model to be loaded')

    # Use GPU or not
    parser.add_argument('--use_cuda',
                        action="store_true",
                        default=False,
                        help="Use GPU or CPU")

    # Parse the parameters
    sample_args = parser.parse_args()

    # Save directory
    load_directory = 'save/'
    load_directory += 'trainedOn_' + str(sample_args.train_dataset)

    # Define the path for the config file for saved args
    ## Arguments of parser while traning
    with open(os.path.join(load_directory, 'config.pkl'), 'rb') as f:
        saved_args = pickle.load(f)

    # Initialize net
    net = SRNN(saved_args, True)
    if saved_args.use_cuda:
        net = net.cuda()

    checkpoint_path = os.path.join(
        load_directory, 'srnn_model_' + str(sample_args.epoch) + '.tar')

    if os.path.isfile(checkpoint_path):
        print('Loading checkpoint')
        checkpoint = torch.load(checkpoint_path)
        # model_iteration = checkpoint['iteration']
        model_epoch = checkpoint['epoch']
        net.load_state_dict(checkpoint['state_dict'])
        print('Loaded checkpoint at {}'.format(model_epoch))

    # Dataset to get data from
    dataset = [sample_args.test_dataset]

    dataloader = DataLoader(1,
                            sample_args.pred_length + sample_args.obs_length,
                            dataset,
                            True,
                            infer=True)

    dataloader.reset_batch_pointer()

    # Construct the ST-graph object
    stgraph = ST_GRAPH(1, sample_args.pred_length + sample_args.obs_length)

    results = []

    # Variable to maintain total error
    total_error = 0
    final_error = 0

    for batch in range(dataloader.num_batches):
        start = time.time()

        # Get the next batch
        x, _, frameIDs, d = dataloader.next_batch(randomUpdate=False)

        # Construct ST graph
        stgraph.readGraph(x)

        nodes, edges, nodesPresent, edgesPresent = stgraph.getSequence()

        # Convert to cuda variables
        nodes = Variable(torch.from_numpy(nodes).float(), volatile=True)
        edges = Variable(torch.from_numpy(edges).float(), volatile=True)
        if saved_args.use_cuda:
            nodes = nodes.cuda()
            edges = edges.cuda()

        # Separate out the observed part of the trajectory
        obs_nodes, obs_edges, obs_nodesPresent, obs_edgesPresent = nodes[:
                                                                         sample_args
                                                                         .
                                                                         obs_length], edges[:
                                                                                            sample_args
                                                                                            .
                                                                                            obs_length], nodesPresent[:
                                                                                                                      sample_args
                                                                                                                      .
                                                                                                                      obs_length], edgesPresent[:
                                                                                                                                                sample_args
                                                                                                                                                .
                                                                                                                                                obs_length]

        # Sample function
        ret_nodes, ret_attn, ret_new_attn = sample(obs_nodes, obs_edges,
                                                   obs_nodesPresent,
                                                   obs_edgesPresent,
                                                   sample_args, net, nodes,
                                                   edges, nodesPresent)

        # Compute mean and final displacement error
        total_error += get_mean_error(ret_nodes[sample_args.obs_length:].data,
                                      nodes[sample_args.obs_length:].data,
                                      nodesPresent[sample_args.obs_length - 1],
                                      nodesPresent[sample_args.obs_length:],
                                      saved_args.use_cuda)
        final_error += get_final_error(
            ret_nodes[sample_args.obs_length:].data,
            nodes[sample_args.obs_length:].data,
            nodesPresent[sample_args.obs_length - 1],
            nodesPresent[sample_args.obs_length:])

        end = time.time()

        print('Processed trajectory number : ', batch, 'out of',
              dataloader.num_batches, 'trajectories in time', end - start)

        # Store results
        if saved_args.use_cuda:
            results.append(
                (nodes.data.cpu().numpy(), ret_nodes.data.cpu().numpy(),
                 nodesPresent, sample_args.obs_length, ret_attn, ret_new_attn,
                 frameIDs))
        else:
            results.append(
                (nodes.data.numpy(), ret_nodes.data.numpy(), nodesPresent,
                 sample_args.obs_length, ret_attn, ret_new_attn, frameIDs))

        # Reset the ST graph
        stgraph.reset()

    print('Total mean error of the model is ',
          total_error / dataloader.num_batches)
    print('Total final error of the model is ',
          final_error / dataloader.num_batches)

    print('Saving results')
    save_directory = load_directory + '/testedOn_' + str(
        sample_args.test_dataset)
    if not os.path.exists(save_directory):
        os.makedirs(save_directory)
    with open(os.path.join(save_directory, 'results.pkl'), 'wb') as f:
        pickle.dump(results, f)
Beispiel #2
0
def main():

    parser = argparse.ArgumentParser()
    # Observed length of the trajectory parameter
    parser.add_argument('--obs_length',
                        type=int,
                        default=8,
                        help='Observed length of the trajectory')
    # Predicted length of the trajectory parameter
    parser.add_argument('--pred_length',
                        type=int,
                        default=12,
                        help='Predicted length of the trajectory')
    # Test dataset
    parser.add_argument('--test_dataset',
                        type=int,
                        default=4,
                        help='Dataset to be tested on')

    parser.add_argument('--sample_dataset',
                        type=int,
                        default=4,
                        help='Dataset to be sampled on')

    # Model to be loaded
    parser.add_argument('--epoch',
                        type=int,
                        default=133,
                        help='Epoch of model to be loaded')
    #[109,149,124,80,128]
    # Parse the parameters
    sample_args = parser.parse_args()

    # Save directory
    save_directory = '/home/hesl/PycharmProjects/srnn-pytorch/save/FixedPixel_150epochs_batchsize24_Pruned/'
    save_directory += str(sample_args.test_dataset) + '/'
    save_directory += 'save_attention'

    ouput_directory = '/home/hesl/PycharmProjects/srnn-pytorch/save/'
    #ouput_directory+= str(sample_args.test_dataset) + '/'
    ouput_directory = save_directory

    # Define the path for the config file for saved args
    with open(os.path.join(save_directory, 'config.pkl'), 'rb') as f:
        saved_args = pickle.load(f)

    # Initialize net
    net = SRNN(saved_args, True)
    net.cuda()
    #net.forward()

    checkpoint_path = os.path.join(
        save_directory, 'srnn_model_' + str(sample_args.epoch) + '.tar')

    if os.path.isfile(checkpoint_path):
        print('Loading checkpoint')
        checkpoint = torch.load(checkpoint_path)
        # model_iteration = checkpoint['iteration']
        model_epoch = checkpoint['epoch']
        net.load_state_dict(checkpoint['state_dict'])
        print('Loaded checkpoint at {}'.format(model_epoch))

    # homography
    H = np.loadtxt(H_path[sample_args.sample_dataset])

    # Dataset to get data from
    dataset = [sample_args.test_dataset]
    dataset = [sample_args.sample_dataset]

    dataloader = DataLoader(1,
                            sample_args.pred_length + sample_args.obs_length,
                            dataset,
                            True,
                            infer=True)

    dataloader.reset_batch_pointer()

    # Construct the ST-graph object
    stgraph = ST_GRAPH(1, sample_args.pred_length + sample_args.obs_length)

    NumberofSampling = 10

    for i in range(NumberofSampling):

        results = []

        # Variable to maintain total error
        total_error = 0
        final_error = 0

        for batch in range(dataloader.num_batches):
            start = time.time()

            # Get the next batch
            x, _, frameIDs, d = dataloader.next_batch(randomUpdate=False)

            # Construct ST graph
            stgraph.readGraph(x)

            nodes, edges, nodesPresent, edgesPresent = stgraph.getSequence()

            # Convert to cuda variables
            nodes = Variable(torch.from_numpy(nodes).float(),
                             volatile=True).cuda()
            edges = Variable(torch.from_numpy(edges).float(),
                             volatile=True).cuda()

            # Separate out the observed part of the trajectory
            obs_nodes, obs_edges, obs_nodesPresent, obs_edgesPresent = nodes[:
                                                                             sample_args
                                                                             .
                                                                             obs_length], edges[:
                                                                                                sample_args
                                                                                                .
                                                                                                obs_length], nodesPresent[:
                                                                                                                          sample_args
                                                                                                                          .
                                                                                                                          obs_length], edgesPresent[:
                                                                                                                                                    sample_args
                                                                                                                                                    .
                                                                                                                                                    obs_length]

            # Sample function
            ret_nodes, ret_attn = sample(obs_nodes, obs_edges,
                                         obs_nodesPresent, obs_edgesPresent,
                                         sample_args, net, nodes, edges,
                                         nodesPresent)

            # Compute mean and final displacement error
            total_error += get_mean_error(
                ret_nodes[sample_args.obs_length:].data,
                nodes[sample_args.obs_length:].data,
                nodesPresent[sample_args.obs_length - 1],
                nodesPresent[sample_args.obs_length:], H,
                sample_args.sample_dataset)
            final_error += get_final_error(
                ret_nodes[sample_args.obs_length:].data,
                nodes[sample_args.obs_length:].data,
                nodesPresent[sample_args.obs_length - 1],
                nodesPresent[sample_args.obs_length:], H,
                sample_args.sample_dataset)

            end = time.time()

            print('Processed trajectory number : ', batch, 'out of',
                  dataloader.num_batches, 'trajectories in time', end - start)

            # Store results
            results.append(
                (nodes.data.cpu().numpy(), ret_nodes.data.cpu().numpy(),
                 nodesPresent, sample_args.obs_length, ret_attn, frameIDs,
                 total_error / dataloader.num_batches,
                 final_error / dataloader.num_batches))

            # Reset the ST graph
            stgraph.reset()

        print('Total mean error of the model is ',
              total_error / dataloader.num_batches)
        print('Total final error of the model is ',
              final_error / dataloader.num_batches)

        current_mean_error = total_error / dataloader.num_batches
        current_final_error = final_error / dataloader.num_batches
        if i == 0:
            min_current_mean_error = current_mean_error
            min_current_final_error = current_final_error
            min_index = i
            print('Saving initial results on {}'.format(i))
            with open(os.path.join(ouput_directory, 'results.pkl'), 'wb') as f:
                pickle.dump(results, f)
        else:
            if current_mean_error < min_current_mean_error:
                min_current_mean_error = current_mean_error
                min_current_final_error = current_final_error
                min_index = i
                print('Found Smaller Error on {}, Saving results'.format(i))
                print(
                    'Smaller current_mean_error"{} and current_final_error:{} and '
                    .format(current_mean_error, current_final_error))
                with open(os.path.join(ouput_directory, 'results.pkl'),
                          'wb') as f:
                    pickle.dump(results, f)

    print(
        'Minimum Total Mean Error is {} and Minimum Final Mean Error is {} on {}th Sampling'
        .format(min_current_mean_error, min_current_final_error, min_index))
def main():

    parser = argparse.ArgumentParser()
    # Observed length of the trajectory parameter
    parser.add_argument('--obs_length', type=int, default=8,
                        help='Observed length of the trajectory')
    # Predicted length of the trajectory parameter
    parser.add_argument('--pred_length', type=int, default=12,
                        help='Predicted length of the trajectory')
    # Test dataset
    parser.add_argument('--test_dataset', type=int, default=3,
                        help='Dataset to be tested on')

    # Model to be loaded
    parser.add_argument('--epoch', type=int, default=107,
                        help='Epoch of model to be loaded')

    # Parse the parameters
    sample_args = parser.parse_args()

    # Save directory
    save_directory = '/home/hesl/PycharmProjects/social-lstm-pytorch/save/FixedPixel_Normalized_150epoch/'+ str(sample_args.test_dataset) + '/'

    save_directory='/home/hesl/PycharmProjects/social-lstm-pytorch/save/FixedPixel_Normalized_150epoch/1/'
    ouput_directory='/home/hesl/PycharmProjects/social-lstm-pytorch/save/'


    # Define the path for the config file for saved args
    with open(os.path.join(save_directory, 'config.pkl'), 'rb') as f:
        saved_args = pickle.load(f)

    # Initialize net
    net = SocialLSTM(saved_args, True)
    net.cuda()

    # Get the checkpoint path
    checkpoint_path = os.path.join(save_directory, 'social_lstm_model_'+str(sample_args.epoch)+'.tar')
    # checkpoint_path = os.path.join(save_directory, 'srnn_model.tar')
    if os.path.isfile(checkpoint_path):
        print('Loading checkpoint')
        checkpoint = torch.load(checkpoint_path)
        # model_iteration = checkpoint['iteration']
        model_epoch = checkpoint['epoch']
        net.load_state_dict(checkpoint['state_dict'])
        print('Loaded checkpoint at epoch', model_epoch)

    #homography
    H = np.loadtxt(H_path[sample_args.test_dataset])

    # Test dataset
    dataset = [sample_args.test_dataset]

    # Create the DataLoader object
    dataloader = DataLoader(1, sample_args.pred_length + sample_args.obs_length, dataset, True, infer=True)

    dataloader.reset_batch_pointer()

    # Construct the ST-graph object
    stgraph = ST_GRAPH(1, sample_args.pred_length + sample_args.obs_length)

    results = []

    # Variable to maintain total error
    total_error = 0
    final_error = 0

    # For each batch
    for batch in range(dataloader.num_batches):
        start = time.time()

        # Get data
        x, _, d = dataloader.next_batch(randomUpdate=False)

        # Get the sequence
        x_seq, d_seq = x[0], d[0]

        # Dimensions of the dataset
        if d_seq == 0 and dataset[0] == 0:
            dimensions = [640, 480]
        else:
            dimensions = [720, 576]

        dimensions=[1224,370]

        # Get the grid masks for the sequence
        grid_seq = getSequenceGridMask(x_seq, dimensions, saved_args.neighborhood_size, saved_args.grid_size)

        # Construct ST graph
        stgraph.readGraph(x)

        # Get nodes and nodesPresent
        nodes, _, nodesPresent, _ = stgraph.getSequence(0)
        nodes = Variable(torch.from_numpy(nodes).float(), volatile=True).cuda()

        # Extract the observed part of the trajectories
        obs_nodes, obs_nodesPresent, obs_grid = nodes[:sample_args.obs_length], nodesPresent[:sample_args.obs_length], grid_seq[:sample_args.obs_length]

        # The sample function
        ret_nodes = sample(obs_nodes, obs_nodesPresent, obs_grid, sample_args, net, nodes, nodesPresent, grid_seq, saved_args, dimensions)
        #print(nodes[sample_args.obs_length:].data)
        # Record the mean and final displacement error
        total_error += get_mean_error(ret_nodes[sample_args.obs_length:].data, nodes[sample_args.obs_length:].data, nodesPresent[sample_args.obs_length-1], nodesPresent[sample_args.obs_length:],H,sample_args.test_dataset)
        final_error += get_final_error(ret_nodes[sample_args.obs_length:].data, nodes[sample_args.obs_length:].data, nodesPresent[sample_args.obs_length-1], nodesPresent[sample_args.obs_length:],H,sample_args.test_dataset)

        end = time.time()

        print('Processed trajectory number : ', batch, 'out of', dataloader.num_batches, 'trajectories in time', end - start)

        results.append((nodes.data.cpu().numpy(), ret_nodes.data.cpu().numpy(), nodesPresent, sample_args.obs_length))

        # Reset the ST graph
        stgraph.reset()

    print('Total mean error of the model is ', total_error / dataloader.num_batches)
    print('Total final error of the model is ', final_error / dataloader.num_batches)

    print('Saving results')
    with open(os.path.join(ouput_directory, 'results.pkl'), 'wb') as f:
        pickle.dump(results, f)
Beispiel #4
0
def main():

    parser = argparse.ArgumentParser()
    # Observed length of the trajectory parameter
    parser.add_argument('--obs_length',
                        type=int,
                        default=8,
                        help='Observed length of the trajectory')
    # Predicted length of the trajectory parameter
    parser.add_argument('--pred_length',
                        type=int,
                        default=12,
                        help='Predicted length of the trajectory')

    # Model to be loaded
    parser.add_argument('--epoch',
                        type=int,
                        default=14,
                        help='Epoch of model to be loaded')
    # cuda support
    parser.add_argument('--use_cuda',
                        action="store_true",
                        default=False,
                        help='Use GPU or not')
    # drive support
    parser.add_argument('--drive',
                        action="store_true",
                        default=False,
                        help='Use Google drive or not')
    # number of iteration -> we are trying many times to get lowest test error derived from observed part and prediction of observed
    # part.Currently it is useless because we are using direct copy of observed part and no use of prediction.Test error will be 0.
    parser.add_argument(
        '--iteration',
        type=int,
        default=1,
        help=
        'Number of iteration to create test file (smallest test errror will be selected)'
    )
    # gru model
    parser.add_argument('--gru',
                        action="store_true",
                        default=False,
                        help='True : GRU cell, False: LSTM cell')
    # method selection
    parser.add_argument(
        '--method',
        type=int,
        default=1,
        help=
        'Method of lstm will be used (1 = social lstm, 2 = obstacle lstm, 3 = vanilla lstm)'
    )

    # Parse the parameters
    sample_args = parser.parse_args()

    #for drive run
    prefix = ''
    f_prefix = '.'
    if sample_args.drive is True:
        prefix = 'drive/semester_project/social_lstm_final/'
        f_prefix = 'drive/semester_project/social_lstm_final'

    #run sh file for folder creation
    if not os.path.isdir("log/"):
        print("Directory creation script is running...")
        subprocess.call([f_prefix + '/make_directories.sh'])

    method_name = get_method_name(sample_args.method)
    model_name = "LSTM"
    save_tar_name = method_name + "_lstm_model_"
    if sample_args.gru:
        model_name = "GRU"
        save_tar_name = method_name + "_gru_model_"

    print("Selected method name: ", method_name, " model name: ", model_name)

    # Save directory
    save_directory = os.path.join(f_prefix, 'model/', method_name, model_name)
    #plot directory for plotting in the future
    plot_directory = os.path.join(f_prefix, 'plot/', method_name, model_name)

    result_directory = os.path.join(f_prefix, 'result/', method_name)
    plot_test_file_directory = 'test'

    # Define the path for the config file for saved args
    with open(os.path.join(save_directory, 'config.pkl'), 'rb') as f:
        saved_args = pickle.load(f)

    seq_lenght = sample_args.pred_length + sample_args.obs_length

    # Create the DataLoader object
    dataloader = DataLoader(f_prefix,
                            1,
                            seq_lenght,
                            forcePreProcess=True,
                            infer=True)
    create_directories(os.path.join(result_directory, model_name),
                       dataloader.get_all_directory_namelist())
    create_directories(plot_directory, [plot_test_file_directory])
    dataloader.reset_batch_pointer()

    dataset_pointer_ins = dataloader.dataset_pointer

    smallest_err = 100000
    smallest_err_iter_num = -1
    origin = (0, 0)
    reference_point = (0, 1)

    submission_store = []  # store submission data points (txt)
    result_store = []  # store points for plotting

    for iteration in range(sample_args.iteration):
        # Initialize net
        net = get_model(sample_args.method, saved_args, True)

        if sample_args.use_cuda:
            net = net.cuda()

        # Get the checkpoint path
        checkpoint_path = os.path.join(
            save_directory, save_tar_name + str(sample_args.epoch) + '.tar')
        if os.path.isfile(checkpoint_path):
            print('Loading checkpoint')
            checkpoint = torch.load(checkpoint_path)
            model_epoch = checkpoint['epoch']
            net.load_state_dict(checkpoint['state_dict'])
            print('Loaded checkpoint at epoch', model_epoch)

        # For each batch
        iteration_submission = []
        iteration_result = []
        results = []
        submission = []

        # Variable to maintain total error
        total_error = 0
        final_error = 0

        for batch in range(dataloader.num_batches):
            start = time.time()
            # Get data
            x, y, d, numPedsList, PedsList, target_ids = dataloader.next_batch(
            )

            # Get the sequence
            x_seq, d_seq, numPedsList_seq, PedsList_seq, target_id = x[0], d[
                0], numPedsList[0], PedsList[0], target_ids[0]
            dataloader.clean_test_data(x_seq, target_id,
                                       sample_args.obs_length,
                                       sample_args.pred_length)
            dataloader.clean_ped_list(x_seq, PedsList_seq, target_id,
                                      sample_args.obs_length,
                                      sample_args.pred_length)

            #get processing file name and then get dimensions of file
            folder_name = dataloader.get_directory_name_with_pointer(d_seq)
            dataset_data = dataloader.get_dataset_dimension(folder_name)

            #dense vector creation
            x_seq, lookup_seq = dataloader.convert_proper_array(
                x_seq, numPedsList_seq, PedsList_seq)

            #will be used for error calculation
            orig_x_seq = x_seq.clone()

            # target_id_values = orig_x_seq[0][lookup_seq[target_id], 0:2]

            #grid mask calculation
            if sample_args.method == 2:  #obstacle lstm
                grid_seq = getSequenceGridMask(x_seq, dataset_data,
                                               PedsList_seq,
                                               saved_args.neighborhood_size,
                                               saved_args.grid_size,
                                               saved_args.use_cuda, True)
            elif sample_args.method == 1:  #social lstm
                grid_seq = getSequenceGridMask(x_seq, dataset_data,
                                               PedsList_seq,
                                               saved_args.neighborhood_size,
                                               saved_args.grid_size,
                                               saved_args.use_cuda)

            #vectorize datapoints
            x_seq, first_values_dict = vectorize_seq(x_seq, PedsList_seq,
                                                     lookup_seq)

            # <------------- Experimental block ---------------->
            # x_seq = translate(x_seq, PedsList_seq, lookup_seq ,target_id_values)
            # angle = angle_between(reference_point, (x_seq[1][lookup_seq[target_id], 0].data.numpy(), x_seq[1][lookup_seq[target_id], 1].data.numpy()))
            # x_seq = rotate_traj_with_target_ped(x_seq, angle, PedsList_seq, lookup_seq)
            # grid_seq = getSequenceGridMask(x_seq[:sample_args.obs_length], dataset_data, PedsList_seq, saved_args.neighborhood_size, saved_args.grid_size, sample_args.use_cuda)
            # x_seq, first_values_dict = vectorize_seq(x_seq, PedsList_seq, lookup_seq)

            if sample_args.use_cuda:
                x_seq = x_seq.cuda()

            # The sample function
            if sample_args.method == 3:  #vanilla lstm
                # Extract the observed part of the trajectories
                obs_traj, obs_PedsList_seq = x_seq[:sample_args.
                                                   obs_length], PedsList_seq[:
                                                                             sample_args
                                                                             .
                                                                             obs_length]
                ret_x_seq = sample(obs_traj, obs_PedsList_seq, sample_args,
                                   net, x_seq, PedsList_seq, saved_args,
                                   dataset_data, dataloader, lookup_seq,
                                   numPedsList_seq, sample_args.gru)

            else:
                # Extract the observed part of the trajectories
                obs_traj, obs_PedsList_seq, obs_grid = x_seq[:sample_args.
                                                             obs_length], PedsList_seq[:
                                                                                       sample_args
                                                                                       .
                                                                                       obs_length], grid_seq[:
                                                                                                             sample_args
                                                                                                             .
                                                                                                             obs_length]
                ret_x_seq = sample(obs_traj, obs_PedsList_seq, sample_args,
                                   net, x_seq, PedsList_seq, saved_args,
                                   dataset_data, dataloader, lookup_seq,
                                   numPedsList_seq, sample_args.gru, obs_grid)

            #revert the points back to original space
            ret_x_seq = revert_seq(ret_x_seq, PedsList_seq, lookup_seq,
                                   first_values_dict)

            # <--------------------- Experimental inverse block ---------------------->
            # ret_x_seq = revert_seq(ret_x_seq, PedsList_seq, lookup_seq, target_id_values, first_values_dict)
            # ret_x_seq = rotate_traj_with_target_ped(ret_x_seq, -angle, PedsList_seq, lookup_seq)
            # ret_x_seq = translate(ret_x_seq, PedsList_seq, lookup_seq ,-target_id_values)

            # Record the mean and final displacement error
            print(PedsList_seq[-sample_args.pred_length:])
            total_error += get_mean_error(
                ret_x_seq[-sample_args.pred_length:].data,
                orig_x_seq[-sample_args.pred_length:].data,
                PedsList_seq[-sample_args.pred_length:],
                PedsList_seq[-sample_args.pred_length:], sample_args.use_cuda,
                lookup_seq)
            final_error += get_final_error(
                ret_x_seq[-sample_args.pred_length:].data,
                orig_x_seq[-sample_args.pred_length:].data,
                PedsList_seq[-sample_args.pred_length:],
                PedsList_seq[-sample_args.pred_length:], lookup_seq)

            end = time.time()

            print('Current file : ', dataloader.get_file_name(0),
                  ' Processed trajectory number : ', batch + 1, 'out of',
                  dataloader.num_batches, 'trajectories in time', end - start)

            # if dataset_pointer_ins is not dataloader.dataset_pointer:
            # if dataloader.dataset_pointer is not 0:
            # iteration_submission.append(submission)
            # iteration_result.append(results)

            # dataset_pointer_ins = dataloader.dataset_pointer
            # submission = []
            # results = []

            # submission.append(submission_preprocess(dataloader, ret_x_seq.data[sample_args.obs_length:, lookup_seq[target_id], :].numpy(), sample_args.pred_length, sample_args.obs_length, target_id))
            # results.append((x_seq.data.cpu().numpy(), ret_x_seq.data.cpu().numpy(), PedsList_seq, lookup_seq , dataloader.get_frame_sequence(seq_lenght), target_id, sample_args.obs_length))

        # iteration_submission.append(submission)
        # iteration_result.append(results)

        # submission_store.append(iteration_submission)
        # result_store.append(iteration_result)

        if total_error < smallest_err:
            print("**********************************************************")
            print('Best iteration has been changed. Previous best iteration: ',
                  smallest_err_iter_num + 1, 'Error: ',
                  smallest_err / dataloader.num_batches)
            print('New best iteration : ', iteration + 1, 'Error: ',
                  total_error / dataloader.num_batches)
            smallest_err_iter_num = iteration
            smallest_err = total_error

        print('Iteration:', iteration + 1,
              ' Total training (observed part) mean error of the model is ',
              total_error / dataloader.num_batches)
        print('Iteration:', iteration + 1,
              'Total training (observed part) final error of the model is ',
              final_error / dataloader.num_batches)
        #print(submission)

    print('Smallest error iteration:', smallest_err_iter_num + 1)
Beispiel #5
0
def main():
    # os.chdir('/home/serene/Documents/KITTIData/GT/')
    # os.chdir('/home/siri0005/copy_srnn_pytorch/srnn-pytorch-master/')#/srnn-pytorch-master

    os.chdir('/home/serene/Documents/copy_srnn_pytorch/srnn-pytorch-master')
    H_path = ['./pedestrians/ewap_dataset/seq_eth/H.txt',
              './pedestrians/ewap_dataset/seq_hotel/H.txt',
              './pedestrians/ucy_crowd/data_zara01/H.txt',
              './pedestrians/ucy_crowd/data_zara02/H.txt',
              './pedestrians/ucy_crowd/data_students03/H.txt']

    # ,1,2,3,
    base_dir = '../fine_obstacle/prelu/p_02/'
    for i in [4]:#,1,2,3,4
        avg = []
        ade = []
        with open(base_dir+'log/{0}/log_attention/val.txt'.format(i)) as val_f:
            if i == 1:
                e = 99
            else:
                best_val = val_f.readline()
                [e, val] = best_val.split(',')

        parser = argparse.ArgumentParser()
        # Observed length of the trajectory parameter
        parser.add_argument('--obs_length', type=int, default=8,
                            help='Observed length of the trajectory')
        # Predicted length of the trajectory parameter
        parser.add_argument('--pred_length', type=int, default=12,
                            help='Predicted length of the trajectory')
        # Test dataset
        parser.add_argument('--test_dataset', type=int, default=i,
                            help='Dataset to be tested on')

        # Model to be loaded
        parser.add_argument('--epoch', type=int, default=e,
                            help='Epoch of model to be loaded')

        # Parse the parameters
        sample_args = parser.parse_args()

        # Save directory
        save_directory = base_dir+'save/{0}/save_attention'.format(i)
        plot_directory = base_dir +  '/selected_plots/' #'plot_1/'
     

        # Define the path for the config file for saved args
        with open(os.path.join(save_directory, 'config.pkl'), 'rb') as f:
            saved_args = pickle.load(f)

        # Initialize net
        net = SRNN(saved_args, sample_args.test_dataset, True)
        net.cuda()

        ## TODO: visualize trained weights
        # plt.imshow(net.humanNodeRNN.edge_embed.weight)
        # plt.colorbar()
        # plt.show()
        checkpoint_path = os.path.join(save_directory, 'srnn_model_'+str(sample_args.epoch)+'.tar')

        if os.path.isfile(checkpoint_path):
            print('Loading checkpoint')
            checkpoint = torch.load(checkpoint_path)
            # model_iteration = checkpoint['iteration']
            model_epoch = checkpoint['epoch']
            net.load_state_dict(checkpoint['state_dict'])
            print('Loaded checkpoint at {}'.format(model_epoch))

        H_mat = np.loadtxt(H_path[i])

        avg = []
        ade = []
        # Dataset to get data from
        dataset = [sample_args.test_dataset]
        sample_ade_error_arr = []
        sample_fde_error_arr = []
        num_nodes = 0
        inner_num_nodes_1= 0
        inner_num_nodes_2= 0
        ade_sum = 0
        fde_sum = 0

        dataloader = DataLoader(1, sample_args.pred_length + sample_args.obs_length, dataset, True, infer=True)

        dataloader.reset_batch_pointer()

        # Construct the ST-graph object
        stgraph = ST_GRAPH(1, sample_args.pred_length + sample_args.obs_length)

        results = []

        # Variable to maintain total error
        total_error = 0
        final_error = 0
    
        # TRY this code version
        for batch in range(dataloader.num_batches):
            sample_fde_error = []
            sample_ade_error = []
            running_time_sample = []
            c = 0
            x, _, frameIDs, d = dataloader.next_batch(randomUpdate=False)

            # Construct ST graph
            stgraph.readGraph(x, ds_ptr=d, threshold=1.0)

            nodes, edges, nodesPresent, edgesPresent, obsNodes, obsEdges, obsNodesPresent, obsEdgesPresent = stgraph.getSequence()
            nodes = Variable(torch.from_numpy(nodes).float(), volatile=True).cuda()
            edges = Variable(torch.from_numpy(edges).float(), volatile=True).cuda()

            obsNodes = Variable(torch.from_numpy(obsNodes).float()).cuda()
            obsEdges = Variable(torch.from_numpy(obsEdges).float()).cuda()

            # Separate out the observed part of the trajectory
            obs_nodes, obs_edges, obs_nodesPresent, obs_edgesPresent = nodes[:sample_args.obs_length], edges[:sample_args.obs_length], nodesPresent[:sample_args.obs_length], edgesPresent[:sample_args.obs_length]
            # Separate out the observed obstacles in a given sequence
            obsnodes_v, obsEdges_v, obsNodesPresent_v, obsEdgesPresent_v = obsNodes[:sample_args.obs_length], obsEdges[:sample_args.obs_length], obsNodesPresent[:sample_args.obs_length], obsEdgesPresent[:sample_args.obs_length]

            # if c == 0:
            # num_nodes += np.shape(nodes)[1]

            for c in range(10):
                num_nodes += np.shape(nodes)[1]
                start = time.time()
                # Sample function
                ret_nodes, ret_attn = sample(obs_nodes, obs_edges, obs_nodesPresent, obs_edgesPresent, obsnodes_v,
                                             obsEdges_v, obsNodesPresent_v,obsEdgesPresent_v, sample_args, net, nodes, edges, nodesPresent)
                end = time.time()
                running_time_sample.append((end-start))
                # print('One-time Sampling took = ', (end - start), ' seconds.')

                # Compute mean and final displacement error
                total_error , _ = get_mean_error(ret_nodes[sample_args.obs_length:].data, nodes[sample_args.obs_length:].data,
                                              nodesPresent[sample_args.obs_length - 1],
                                              nodesPresent[sample_args.obs_length:], H_mat, i)

                # print("ADE errors:", total_error)
                inner_num_nodes_1 += _
                sample_ade_error.append(total_error)
                

                final_error , _ = get_final_error(ret_nodes[sample_args.obs_length:].data, nodes[sample_args.obs_length:].data,
                                               nodesPresent[sample_args.obs_length - 1],
                                               nodesPresent[sample_args.obs_length:], H_mat, i)
                

                # print("final errors:", final_error)

                sample_fde_error.append(final_error)
               
                results.append((nodes.data.cpu().numpy(), ret_nodes.data.cpu().numpy(), nodesPresent, sample_args.obs_length, ret_attn, frameIDs))
       
                stgraph.reset()
            

            sample_ade_error_arr.append(np.sum(sample_ade_error))
            sample_fde_error_arr.append(np.sum(sample_fde_error))

            sample_ade_error = np.sum(sample_ade_error, 0)
        
            if len(sample_ade_error):
                # sample_ade_error /= 10
                sample_ade_error = torch.min(sample_ade_error)
                ade_sum += sample_ade_error
                ade.append(ade_sum) 
      
            # for non-rectangular tensors
            for (e, idx) in zip(sample_fde_error , range(len(sample_fde_error))):
                if int(len(e)) > 0 :
                    l = int(len(e))
                    sample_fde_error[idx] = np.sum(e) #/l
                else:
                    del sample_fde_error[idx]

      
            print(sample_fde_error)
            if (np.ndim(sample_fde_error) == 1 and len(sample_fde_error)) or \
                (np.ndim(sample_fde_error) > 1 and np.all([True for x in sample_fde_error if len(x) > 0] == True)):
       
                sample_fde_error = np.min(sample_fde_error)
                fde_sum += sample_fde_error
                avg.append(fde_sum)

            with open(os.path.join(save_directory, 'results.pkl'), 'wb') as f:
                pickle.dump(results, f)

        print('SUMMARY **************************//')

        print('One-time Sampling took = ', np.average(running_time_sample), ' seconds.')
        print(np.sum(ade) , '   ' , np.sum(avg))
        print('average ADE', np.sum(ade) / (sample_args.pred_length * num_nodes))#
        print('average FDE', np.sum(avg) / (num_nodes*10))#
       
        with open(os.path.join(save_directory, 'sampling_results.txt'), 'wb') as o:
            np.savetxt(os.path.join(save_directory, 'sampling_results.txt'), (np.sum(ade) / (sample_args.pred_length * num_nodes),
                        np.sum(avg) / inner_num_nodes_1))
def main():
    parser = argparse.ArgumentParser()
    # Model to be loaded
    parser.add_argument('--epoch',
                        type=int,
                        default=15,
                        help='Epoch of model to be loaded')

    parser.add_argument('--seq_length',
                        type=int,
                        default=20,
                        help='RNN sequence length')

    parser.add_argument('--use_cuda',
                        action="store_true",
                        default=False,
                        help='Use GPU or not')

    parser.add_argument('--drive',
                        action="store_true",
                        default=False,
                        help='Use Google drive or not')
    # Size of neighborhood to be considered parameter
    parser.add_argument(
        '--neighborhood_size',
        type=int,
        default=32,
        help='Neighborhood size to be considered for social grid')
    # Size of the social grid parameter
    parser.add_argument('--grid_size',
                        type=int,
                        default=4,
                        help='Grid size of the social grid')
    # number of validation will be used
    parser.add_argument(
        '--num_validation',
        type=int,
        default=5,
        help='Total number of validation dataset will be visualized')
    # gru support
    parser.add_argument('--gru',
                        action="store_true",
                        default=False,
                        help='True : GRU cell, False: LSTM cell')
    # method selection
    parser.add_argument(
        '--method',
        type=int,
        default=1,
        help=
        'Method of lstm will be used (1 = social lstm, 2 = obstacle lstm, 3 = vanilla lstm)'
    )

    # Parse the parameters
    sample_args = parser.parse_args()

    # for drive run
    prefix = ''
    f_prefix = '.'
    if sample_args.drive is True:
        prefix = 'drive/semester_project/social_lstm_final/'
        f_prefix = 'drive/semester_project/social_lstm_final'

    method_name = get_method_name(sample_args.method)
    model_name = "LSTM"
    save_tar_name = method_name + "_lstm_model_"
    if sample_args.gru:
        model_name = "GRU"
        save_tar_name = method_name + "_gru_model_"

    # Save directory
    save_directory = os.path.join(f_prefix, 'model/', method_name, model_name)

    # plot directory for plotting in the future
    plot_directory = os.path.join(f_prefix, 'plot/', method_name, model_name)

    plot_validation_file_directory = 'validation'

    # Define the path for the config file for saved args
    with open(os.path.join(save_directory, 'config.pkl'), 'rb') as f:
        saved_args = pickle.load(f)

    origin = (0, 0)
    reference_point = (0, 1)
    net = get_model(sample_args.method, saved_args, True)
    if sample_args.use_cuda:
        net = net.cuda()

    # Get the checkpoint path
    checkpoint_path = os.path.join(
        save_directory, save_tar_name + str(sample_args.epoch) + '.tar')
    if os.path.isfile(checkpoint_path):
        print('Loading checkpoint')
        checkpoint = torch.load(checkpoint_path)
        model_epoch = checkpoint['epoch']
        net.load_state_dict(checkpoint['state_dict'])
        print('Loaded checkpoint at epoch', model_epoch)

    # Create the DataLoader object
    dataloader = DataLoader(f_prefix,
                            1,
                            sample_args.seq_length,
                            num_of_validation=sample_args.num_validation,
                            forcePreProcess=True,
                            infer=True)
    create_directories(plot_directory, [plot_validation_file_directory])
    dataloader.reset_batch_pointer()

    print(
        '****************Validation dataset batch processing******************'
    )
    dataloader.reset_batch_pointer(valid=False)
    dataset_pointer_ins = dataloader.dataset_pointer

    loss_epoch = 0
    err_epoch = 0
    f_err_epoch = 0
    num_of_batch = 0
    smallest_err = 100000

    # results of one epoch for all validation datasets
    epoch_result = []
    # results of one validation dataset
    results = []

    # For each batch
    for batch in range(dataloader.num_batches):
        start = time.time()
        # Get batch data
        x, y, d, numPedsList, PedsList, target_ids = dataloader.next_batch()

        if dataset_pointer_ins is not dataloader.dataset_pointer:
            if dataloader.dataset_pointer is not 0:
                print('Finished prosessed file : ',
                      dataloader.get_file_name(-1), ' Avarage error : ',
                      err_epoch / num_of_batch)
                num_of_batch = 0
                epoch_result.append(results)

            dataset_pointer_ins = dataloader.dataset_pointer
            results = []

        # Loss for this batch
        loss_batch = 0
        err_batch = 0
        f_err_batch = 0

        # For each sequence
        for sequence in range(dataloader.batch_size):
            # Get data corresponding to the current sequence
            x_seq, _, d_seq, numPedsList_seq, PedsList_seq = x[sequence], y[
                sequence], d[sequence], numPedsList[sequence], PedsList[
                    sequence]
            target_id = target_ids[sequence]

            folder_name = dataloader.get_directory_name_with_pointer(d_seq)
            dataset_data = dataloader.get_dataset_dimension(folder_name)

            # dense vector creation
            x_seq, lookup_seq = dataloader.convert_proper_array(
                x_seq, numPedsList_seq, PedsList_seq)

            # will be used for error calculation
            orig_x_seq = x_seq.clone()

            target_id_values = x_seq[0][lookup_seq[target_id], 0:2]

            # grid mask calculation
            if sample_args.method == 2:  # obstacle lstm
                grid_seq = getSequenceGridMask(x_seq, dataset_data,
                                               PedsList_seq,
                                               saved_args.neighborhood_size,
                                               saved_args.grid_size,
                                               saved_args.use_cuda, True)
            elif sample_args.method == 1:  # social lstm
                grid_seq = getSequenceGridMask(x_seq, dataset_data,
                                               PedsList_seq,
                                               saved_args.neighborhood_size,
                                               saved_args.grid_size,
                                               saved_args.use_cuda)

            # vectorize datapoints
            x_seq, first_values_dict = vectorize_seq(x_seq, PedsList_seq,
                                                     lookup_seq)

            # <---------------- Experimental block (may need update in methods)----------------------->
            # x_seq = translate(x_seq, PedsList_seq, lookup_seq ,target_id_values)
            # angle = angle_between(reference_point, (x_seq[1][lookup_seq[target_id], 0].data.numpy(), x_seq[1][lookup_seq[target_id], 1].data.numpy()))
            # x_seq = rotate_traj_with_target_ped(x_seq, angle, PedsList_seq, lookup_seq)
            # # Compute grid masks
            # grid_seq = getSequenceGridMask(x_seq, dataset_data, PedsList_seq, sample_args.neighborhood_size, sample_args.grid_size, sample_args.use_cuda)
            # x_seq, first_values_dict = vectorize_seq(x_seq, PedsList_seq, lookup_seq)

            if sample_args.use_cuda:
                x_seq = x_seq.cuda()

            if sample_args.method == 3:  # vanilla lstm
                ret_x_seq, loss = sample_validation_data_vanilla(
                    x_seq, PedsList_seq, sample_args, net, lookup_seq,
                    numPedsList_seq, dataloader)

            else:
                ret_x_seq, loss = sample_validation_data(
                    x_seq, PedsList_seq, grid_seq, sample_args, net,
                    lookup_seq, numPedsList_seq, dataloader)

            # <---------------------Experimental inverse block -------------->
            # ret_x_seq = revert_seq(ret_x_seq, PedsList_seq, lookup_seq, target_id_values, first_values_dict)
            # ret_x_seq = rotate_traj_with_target_ped(ret_x_seq, -angle, PedsList_seq, lookup_seq)
            # ret_x_seq = translate(ret_x_seq, PedsList_seq, lookup_seq ,-target_id_values)
            # revert the points back to original space
            ret_x_seq = revert_seq(ret_x_seq, PedsList_seq, lookup_seq,
                                   first_values_dict)

            err = get_mean_error(ret_x_seq.data, orig_x_seq.data, PedsList_seq,
                                 PedsList_seq, sample_args.use_cuda,
                                 lookup_seq)
            f_err = get_final_error(ret_x_seq.data, orig_x_seq.data,
                                    PedsList_seq, PedsList_seq, lookup_seq)

            loss_batch += loss
            err_batch += err
            f_err_batch += f_err
            results.append(
                (orig_x_seq.data.cpu().numpy(), ret_x_seq.data.cpu().numpy(),
                 PedsList_seq, lookup_seq,
                 dataloader.get_frame_sequence(sample_args.seq_length),
                 target_id))

        end = time.time()
        print('Current file : ', dataloader.get_file_name(0), ' Batch : ',
              batch + 1, ' Sequence: ', sequence + 1, ' Sequence mean error: ',
              err, ' Sequence final error: ', f_err, ' time: ', end - start)
        loss_batch = loss_batch / dataloader.batch_size
        err_batch = err_batch / dataloader.batch_size
        f_err_batch = f_err_batch / dataloader.batch_size
        num_of_batch += 1
        loss_epoch += loss_batch.item()
        err_epoch += err_batch
        f_err_epoch += f_err_batch

    epoch_result.append(results)

    if dataloader.num_batches != 0:
        loss_epoch = loss_epoch / dataloader.num_batches
        err_epoch = err_epoch / dataloader.num_batches
        f_err_epoch = f_err_epoch / dataloader.num_batches
        print(
            'valid_loss = {:.3f}, valid_mean_err = {:.3f}, valid_final_err = {:.3f}'
            .format(loss_epoch, err_epoch, f_err_epoch))

    dataloader.write_to_plot_file(
        epoch_result,
        os.path.join(plot_directory, plot_validation_file_directory))
Beispiel #7
0
def main():
    # os.chdir('/home/serene/Documents/KITTIData/GT/')
    # os.chdir('/home/siri0005/srnn-pytorch-master/')#/srnn-pytorch-master

    os.chdir('/home/serene/Documents/copy_srnn_pytorch/srnn-pytorch-master')
    H_path = ['././pedestrians/ewap_dataset/seq_eth/H.txt',
              '././pedestrians/ewap_dataset/seq_hotel/H.txt',
              '././pedestrians/ucy_crowd/data_zara01/H.txt',
              '././pedestrians/ucy_crowd/data_zara02/H.txt',
              '././pedestrians/ucy_crowd/data_students03/H.txt']
    avg = []
    ade = []
    # ,1,2,3,
    # base_dir = '/home/serene/Downloads/srnn-pytorch-master/' #'../MultiNodeAttn_HH/' #'../fine_obstacle/prelu/p_02/'
    base_dir = '../MultiNodeAttn_HH/' #'/home/serene/Downloads/ablation/'
    for i in [1]:
        with open(base_dir+'log/{0}/log_attention/val.txt'.format(i)) as val_f:
            best_val = val_f.readline()
            # e = 45
            [e, val] = best_val.split(',')

        parser = argparse.ArgumentParser()
        # Observed length of the trajectory parameter
        parser.add_argument('--obs_length', type=int, default=8,
                            help='Observed length of the trajectory')
        # Predicted length of the trajectory parameter
        parser.add_argument('--pred_length', type=int, default=12,
                            help='Predicted length of the trajectory')
        # Test dataset
        parser.add_argument('--test_dataset', type=int, default=i,
                            help='Dataset to be tested on')

        # Model to be loaded
        parser.add_argument('--epoch', type=int, default=e,
                            help='Epoch of model to be loaded')

        parser.add_argument('--use_cuda', action="store_true", default=True,
                            help="Use GPU or CPU")

        # Parse the parameters
        sample_args = parser.parse_args()

        # Save directory
        save_directory = base_dir+'save/{0}/save_attention'.format(i)
        plot_directory = base_dir +  '/selected_plots/' #'plot_1/'
        # save_directory = './srnn-pytorch-master/fine_obstacle/save/{0}/save_attention'.format(i)
        #'/home/serene/Documents/copy_srnn_pytorch/srnn-pytorch-master/save/pixel_data/100e/'
        #'/home/serene/Documents/InVehicleCamera/save_kitti/'

        # save_directory += str(sample_args.test_dataset)+'/'
        # save_directory += 'save_attention'

        # Define the path for the config file for saved args
        with open(os.path.join(save_directory, 'config.pkl'), 'rb') as f:
            saved_args = pickle.load(f)

        # Initialize net
        net = SRNN(saved_args, True)
        net.cuda()

        ## TODO: visualize trained weights
        # plt.imshow(net.humanNodeRNN.edge_embed.weight)
        # plt.colorbar()
        # plt.show()
        checkpoint_path = os.path.join(save_directory, 'srnn_model_'+str(sample_args.epoch)+'.tar')

        if os.path.isfile(checkpoint_path):
            print('Loading checkpoint')
            checkpoint = torch.load(checkpoint_path)
            # model_iteration = checkpoint['iteration']
            model_epoch = checkpoint['epoch']
            net.load_state_dict(checkpoint['state_dict'])
            print('Loaded checkpoint at {}'.format(model_epoch))

        H_mat = np.loadtxt(H_path[i])

        avg = []
        ade = []
        # Dataset to get data from
        dataset = [sample_args.test_dataset]
        for c in range(30):
            dataloader = DataLoader(1, sample_args.pred_length + sample_args.obs_length, dataset, True, infer=True)

            dataloader.reset_batch_pointer()

            # Construct the ST-graph object
            stgraph = ST_GRAPH(1, sample_args.pred_length + sample_args.obs_length)

            results = []

            # Variable to maintain total error
            total_error = 0
            final_error = 0
            minimum = 1000
            min_final = 1000
            for batch in range(dataloader.num_batches):
                start = time.time()

                # Get the next batch
                x, _, frameIDs, d = dataloader.next_batch(randomUpdate=False)

                # Construct ST graph
                stgraph.readGraph(x, ds_ptr=d,threshold=1.0)

                nodes, edges, nodesPresent, edgesPresent = stgraph.getSequence()
                #obsNodes, obsEdges, obsNodesPresent, obsEdgesPresent

                # Convert to cuda variables
                nodes = Variable(torch.from_numpy(nodes).float(), volatile=True).cuda()
                edges = Variable(torch.from_numpy(edges).float(), volatile=True).cuda()

                # obsNodes = Variable(torch.from_numpy(obsNodes).float()).cuda()
                # obsEdges = Variable(torch.from_numpy(obsEdges).float()).cuda()
                # NOTE: obs_ : observed
                # Separate out the observed part of the trajectory
                obs_nodes, obs_edges, obs_nodesPresent, obs_edgesPresent = nodes[:sample_args.obs_length], edges[:sample_args.obs_length], nodesPresent[:sample_args.obs_length], edgesPresent[:sample_args.obs_length]

                # Separate out the observed obstacles in a given sequence
                # obsnodes_v, obsEdges_v , obsNodesPresent_v , obsEdgesPresent_v = obsNodes[:sample_args.obs_length], obsEdges[:sample_args.obs_length], obsNodesPresent[:sample_args.obs_length], obsEdgesPresent[:sample_args.obs_length]

                # Sample function
                ret_nodes, ret_attn = sample(obs_nodes, obs_edges, obs_nodesPresent, obs_edgesPresent,sample_args, net, nodes, edges, nodesPresent)
                    # , obsnodes_v , obsEdges_v, obsNodesPresent_v,
                    #   obsEdgesPresent_v ,  )

                # Compute mean and final displacement error
                total_error += get_mean_error(ret_nodes[sample_args.obs_length:].data, nodes[sample_args.obs_length:].data,
                                              nodesPresent[sample_args.obs_length - 1],
                                              nodesPresent[sample_args.obs_length:], H_mat, i)

                final_error += get_final_error(ret_nodes[sample_args.obs_length:].data, nodes[sample_args.obs_length:].data,
                                               nodesPresent[sample_args.obs_length - 1],
                                               nodesPresent[sample_args.obs_length:], H_mat, i)

                # if total_error < minimum:
                #     minimum = total_error
                # if final_error < min_final:
                #     min_final = final_error

                end = time.time()

                # Store results
                results.append((nodes.data.cpu().numpy(), ret_nodes.data.cpu().numpy(), nodesPresent, sample_args.obs_length, ret_attn, frameIDs))
                # zfill = 3

                # for i in range(len(results)):
                #     skip = str(int(results[i][5][0][8])).zfill(zfill)
                #     # img_file = '/home/serene/Documents/video/hotel/frame-{0}.jpg'.format(skip)
                #     # for j in range(20):
                #     #     if i == 40:
                #
                #     img_file = '/home/serene/Documents/copy_srnn_pytorch/data/ucy/zara/zara.png'
                #     name = plot_directory  + 'sequence_zara' + str(i)  # /pedestrian_1
                #     # for k in range(20):
                #     vis.plot_trajectories(results[i][0], results[i][1], results[i][2], results[i][3], name,
                #                       plot_directory, img_file, 1)
                #     if int(skip) >= 999 and zfill < 4:
                #         zfill = zfill + 1
                #     elif int(skip) >= 9999 and zfill < 5:
                #         zfill = zfill + 1

                # Reset the ST graph
                stgraph.reset()

            print('Total mean error of the model is ', total_error / dataloader.num_batches)
            print('Total final error of the model is ', final_error / dataloader.num_batches)
            ade.append(total_error / dataloader.num_batches)
            avg.append(final_error / dataloader.num_batches)
            print('Saving results')
            with open(os.path.join(save_directory, 'results.pkl'), 'wb') as f:
                pickle.dump(results, f)

        print('average FDE', np.average(avg))
        print('average ADE', np.average(ade))

        with open(os.path.join(save_directory, 'sampling_results.txt'), 'wb') as o:
            np.savetxt(os.path.join(save_directory, 'sampling_results.txt'), (ade, avg) , fmt='%.03e')
Beispiel #8
0
def main():

    parser = argparse.ArgumentParser()
    # Model to be loaded
    # RNN size parameter (dimension of the output/hidden state)
    parser.add_argument('--input_size', type=int, default=2)
    parser.add_argument('--output_size', type=int, default=5)
    parser.add_argument('--seq_length',
                        type=int,
                        default=20,
                        help='RNN sequence length')

    # Size of each batch parameter
    parser.add_argument('--batch_size',
                        type=int,
                        default=10,
                        help='minibatch size')

    parser.add_argument('--num_samples',
                        type=int,
                        default=500,
                        help='NUmber of random configuration will be tested')

    parser.add_argument('--num_epochs',
                        type=int,
                        default=3,
                        help='number of epochs')
    # Maximum number of pedestrians to be considered
    parser.add_argument('--maxNumPeds',
                        type=int,
                        default=27,
                        help='Maximum Number of Pedestrians')
    # cuda support
    parser.add_argument('--use_cuda',
                        action="store_true",
                        default=False,
                        help='Use GPU or not')
    # drive support
    parser.add_argument('--drive',
                        action="store_true",
                        default=False,
                        help='Use Google drive or not')
    # number of validation dataset will be used
    parser.add_argument(
        '--num_validation',
        type=int,
        default=1,
        help='Total number of validation dataset will be visualized')
    # gru model
    parser.add_argument('--gru',
                        action="store_true",
                        default=False,
                        help='True : GRU cell, False: LSTM cell')
    # method selection for hyperparameter
    parser.add_argument(
        '--method',
        type=int,
        default=1,
        help=
        'Method of lstm will be used (1 = social lstm, 2 = obstacle lstm, 3 = vanilla lstm)'
    )
    # number of parameter set will be logged
    parser.add_argument('--best_n',
                        type=int,
                        default=100,
                        help='Number of best n configuration will be logged')

    # Parse the parameters
    #sample_args = parser.parse_args()
    args = parameters(parser)

    args.best_n = np.clip(args.best_n, 0, args.num_samples)

    #for drive run
    prefix = ''
    f_prefix = '.'
    if args.drive is True:
        prefix = 'drive/semester_project/social_lstm_final/'
        f_prefix = 'drive/semester_project/social_lstm_final'

    method_name = get_method_name(args.method)
    model_name = "LSTM"
    save_tar_name = method_name + "_lstm_model_"
    if args.gru:
        model_name = "GRU"
        save_tar_name = method_name + "_gru_model_"

    #plot directory for plotting in the future
    param_log = os.path.join(f_prefix)
    param_log_file = "hyperparameter"

    origin = (0, 0)
    reference_point = (0, 1)
    score = []
    param_set = []

    # Create the DataLoader object
    create_directories(param_log, [param_log_file])
    log_file = open(os.path.join(param_log, param_log_file, 'log.txt'), 'w+')

    dataloader_t = DataLoader(f_prefix,
                              args.batch_size,
                              args.seq_length,
                              num_of_validation=args.num_validation,
                              forcePreProcess=True,
                              infer=True)
    dataloader_v = DataLoader(f_prefix,
                              1,
                              args.seq_length,
                              num_of_validation=args.num_validation,
                              forcePreProcess=True,
                              infer=True)

    for hyperparams in itertools.islice(sample_hyperparameters(),
                                        args.num_samples):
        args = parameters(parser)
        # randomly sample a parameter set
        args.rnn_size = hyperparams.pop("rnn_size")
        args.learning_schedule = hyperparams.pop("learning_schedule")
        args.grad_clip = hyperparams.pop("grad_clip")
        args.learning_rate = hyperparams.pop("learning_rate")
        args.lambda_param = hyperparams.pop("lambda_param")
        args.dropout = hyperparams.pop("dropout")
        args.embedding_size = hyperparams.pop("embedding_size")
        args.neighborhood_size = hyperparams.pop("neighborhood_size")
        args.grid_size = hyperparams.pop("grid_size")

        log_file.write("##########Parameters########" + '\n')
        print("##########Parameters########")
        write_to_file(log_file, args)
        print_to_screen(args)

        net = get_model(args.method, args)

        if args.use_cuda:
            net = net.cuda()

        if (args.learning_schedule == "RMSprop"):
            optimizer = torch.optim.RMSprop(net.parameters(),
                                            lr=args.learning_rate)
        elif (args.learning_schedule == "adagrad"):
            optimizer = torch.optim.Adagrad(net.parameters(),
                                            weight_decay=args.lambda_param)
        else:
            optimizer = torch.optim.Adam(net.parameters(),
                                         weight_decay=args.lambda_param)

        learning_rate = args.learning_rate

        total_process_start = time.time()

        # Training
        for epoch in range(args.num_epochs):
            print('****************Training epoch beginning******************')
            dataloader_t.reset_batch_pointer()
            loss_epoch = 0

            # For each batch
            for batch in range(dataloader_t.num_batches):
                start = time.time()

                # Get batch data
                x, y, d, numPedsList, PedsList, target_ids = dataloader_t.next_batch(
                )

                loss_batch = 0

                # For each sequence
                for sequence in range(dataloader_t.batch_size):
                    # Get the data corresponding to the current sequence
                    x_seq, _, d_seq, numPedsList_seq, PedsList_seq = x[
                        sequence], y[sequence], d[sequence], numPedsList[
                            sequence], PedsList[sequence]
                    target_id = target_ids[sequence]

                    #get processing file name and then get dimensions of file
                    folder_name = dataloader_t.get_directory_name_with_pointer(
                        d_seq)
                    dataset_data = dataloader_t.get_dataset_dimension(
                        folder_name)

                    #dense vector creation
                    x_seq, lookup_seq = dataloader_t.convert_proper_array(
                        x_seq, numPedsList_seq, PedsList_seq)
                    target_id_values = x_seq[0][lookup_seq[target_id], 0:2]
                    #grid mask calculation
                    if args.method == 2:  #obstacle lstm
                        grid_seq = getSequenceGridMask(x_seq, dataset_data,
                                                       PedsList_seq,
                                                       args.neighborhood_size,
                                                       args.grid_size,
                                                       args.use_cuda, True)
                    elif args.method == 1:  #social lstm
                        grid_seq = getSequenceGridMask(x_seq, dataset_data,
                                                       PedsList_seq,
                                                       args.neighborhood_size,
                                                       args.grid_size,
                                                       args.use_cuda)
                    # vectorize trajectories in sequence
                    x_seq, _ = vectorize_seq(x_seq, PedsList_seq, lookup_seq)

                    if args.use_cuda:
                        x_seq = x_seq.cuda()

                    #number of peds in this sequence per frame
                    numNodes = len(lookup_seq)

                    hidden_states = Variable(
                        torch.zeros(numNodes, args.rnn_size))
                    if args.use_cuda:
                        hidden_states = hidden_states.cuda()

                    cell_states = Variable(torch.zeros(numNodes,
                                                       args.rnn_size))
                    if args.use_cuda:
                        cell_states = cell_states.cuda()

                    # Zero out gradients
                    net.zero_grad()
                    optimizer.zero_grad()

                    # Forward prop
                    if args.method == 3:  #vanilla lstm
                        outputs, _, _ = net(x_seq, hidden_states, cell_states,
                                            PedsList_seq, numPedsList_seq,
                                            dataloader_t, lookup_seq)

                    else:
                        outputs, _, _ = net(x_seq, grid_seq, hidden_states,
                                            cell_states, PedsList_seq,
                                            numPedsList_seq, dataloader_t,
                                            lookup_seq)

                    # Compute loss
                    loss = Gaussian2DLikelihood(outputs, x_seq, PedsList_seq,
                                                lookup_seq)
                    loss_batch += loss.item()

                    # Compute gradients
                    loss.backward()

                    # Clip gradients
                    torch.nn.utils.clip_grad_norm_(net.parameters(),
                                                   args.grad_clip)

                    # Update parameters
                    optimizer.step()

                end = time.time()
                loss_batch = loss_batch / dataloader_t.batch_size
                loss_epoch += loss_batch

                print(
                    '{}/{} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}'
                    .format(epoch * dataloader_t.num_batches + batch,
                            args.num_epochs * dataloader_t.num_batches, epoch,
                            loss_batch, end - start))

            loss_epoch /= dataloader_t.num_batches
            # Log loss values
            log_file.write("Training epoch: " + str(epoch) + " loss: " +
                           str(loss_epoch) + '\n')

        net = get_model(args.method, args, True)

        if args.use_cuda:
            net = net.cuda()

        if (args.learning_schedule == "RMSprop"):
            optimizer = torch.optim.RMSprop(net.parameters(),
                                            lr=args.learning_rate)
        elif (args.learning_schedule == "adagrad"):
            optimizer = torch.optim.Adagrad(net.parameters(),
                                            weight_decay=args.lambda_param)
        else:
            optimizer = torch.optim.Adam(net.parameters(),
                                         weight_decay=args.lambda_param)

        print(
            '****************Validation dataset batch processing******************'
        )
        dataloader_v.reset_batch_pointer()
        dataset_pointer_ins = dataloader_v.dataset_pointer

        loss_epoch = 0
        err_epoch = 0
        f_err_epoch = 0
        num_of_batch = 0
        smallest_err = 100000

        # For each batch
        for batch in range(dataloader_v.num_batches):
            start = time.time()
            # Get batch data
            x, y, d, numPedsList, PedsList, target_ids = dataloader_v.next_batch(
            )

            if dataset_pointer_ins is not dataloader_v.dataset_pointer:
                if dataloader_v.dataset_pointer is not 0:
                    print('Finished prosessed file : ',
                          dataloader_v.get_file_name(-1), ' Avarage error : ',
                          err_epoch / num_of_batch)
                    num_of_batch = 0
                dataset_pointer_ins = dataloader_v.dataset_pointer

            # Loss for this batch
            loss_batch = 0
            err_batch = 0
            f_err_batch = 0

            # For each sequence
            for sequence in range(dataloader_v.batch_size):
                # Get data corresponding to the current sequence
                x_seq, _, d_seq, numPedsList_seq, PedsList_seq = x[
                    sequence], y[sequence], d[sequence], numPedsList[
                        sequence], PedsList[sequence]
                target_id = target_ids[sequence]

                folder_name = dataloader_v.get_directory_name_with_pointer(
                    d_seq)
                dataset_data = dataloader_v.get_dataset_dimension(folder_name)

                #dense vector creation
                x_seq, lookup_seq = dataloader_v.convert_proper_array(
                    x_seq, numPedsList_seq, PedsList_seq)

                #will be used for error calculation
                orig_x_seq = x_seq.clone()
                target_id_values = x_seq[0][lookup_seq[target_id], 0:2]

                #grid mask calculation
                if args.method == 2:  #obstacle lstm
                    grid_seq = getSequenceGridMask(x_seq, dataset_data,
                                                   PedsList_seq,
                                                   args.neighborhood_size,
                                                   args.grid_size,
                                                   args.use_cuda, True)
                elif args.method == 1:  #social lstm
                    grid_seq = getSequenceGridMask(x_seq, dataset_data,
                                                   PedsList_seq,
                                                   args.neighborhood_size,
                                                   args.grid_size,
                                                   args.use_cuda)
                # vectorize trajectories in sequence
                x_seq, first_values_dict = vectorize_seq(
                    x_seq, PedsList_seq, lookup_seq)

                # <--------------Experimental block --------------->
                # Construct variables
                # x_seq, lookup_seq = dataloader_v.convert_proper_array(x_seq, numPedsList_seq, PedsList_seq)
                # x_seq, target_id_values, first_values_dict = vectorize_seq_with_ped(x_seq, PedsList_seq, lookup_seq ,target_id)
                # angle = angle_between(reference_point, (x_seq[1][lookup_seq[target_id], 0].data.numpy(), x_seq[1][lookup_seq[target_id], 1].data.numpy()))
                # x_seq = rotate_traj_with_target_ped(x_seq, angle, PedsList_seq, lookup_seq)

                if args.use_cuda:
                    x_seq = x_seq.cuda()

                if args.method == 3:  #vanilla lstm
                    ret_x_seq, loss = sample_validation_data_vanilla(
                        x_seq, PedsList_seq, args, net, lookup_seq,
                        numPedsList_seq, dataloader_v)

                else:
                    ret_x_seq, loss = sample_validation_data(
                        x_seq, PedsList_seq, grid_seq, args, net, lookup_seq,
                        numPedsList_seq, dataloader_v)

                #revert the points back to original space
                ret_x_seq = revert_seq(ret_x_seq, PedsList_seq, lookup_seq,
                                       first_values_dict)

                err = get_mean_error(ret_x_seq.data, orig_x_seq.data,
                                     PedsList_seq, PedsList_seq, args.use_cuda,
                                     lookup_seq)
                f_err = get_final_error(ret_x_seq.data, orig_x_seq.data,
                                        PedsList_seq, PedsList_seq, lookup_seq)

                # ret_x_seq = rotate_traj_with_target_ped(ret_x_seq, -angle, PedsList_seq, lookup_seq)
                # ret_x_seq = revert_seq(ret_x_seq, PedsList_seq, lookup_seq, target_id_values, first_values_dict)

                loss_batch += loss.item()
                err_batch += err
                f_err_batch += f_err

            end = time.time()
            print('Current file : ', dataloader_v.get_file_name(0),
                  ' Batch : ', batch + 1, ' Sequence: ', sequence + 1,
                  ' Sequence mean error: ', err, ' Sequence final error: ',
                  f_err, ' time: ', end - start)
            loss_batch = loss_batch / dataloader_v.batch_size
            err_batch = err_batch / dataloader_v.batch_size
            f_err_batch = f_err_batch / dataloader_v.batch_size
            num_of_batch += 1
            loss_epoch += loss_batch
            err_epoch += err_batch
            f_err_epoch += f_err_batch

        total_process_end = time.time()
        if dataloader_v.num_batches != 0:
            loss_epoch = loss_epoch / dataloader_v.num_batches
            err_epoch = err_epoch / dataloader_v.num_batches
            f_err_epoch = f_err_epoch / dataloader_v.num_batches
            # calculate avarage error and time
            avg_err = (err_epoch + f_err_epoch) / 2
            elapsed_time = (total_process_end - total_process_start)
            args.time = elapsed_time
            args.avg_err = avg_err

            score.append(avg_err)
            param_set.append(args)

            print(
                'valid_loss = {:.3f}, valid_mean_err = {:.3f}, valid_final_err = {:.3f}, score = {:.3f}, time = {:.3f}'
                .format(loss_epoch, err_epoch, f_err_epoch, avg_err,
                        elapsed_time))
            log_file.write(
                'valid_loss = {:.3f}, valid_mean_err = {:.3f}, valid_final_err = {:.3f}, score = {:.3f}, time = {:.3f}'
                .format(loss_epoch, err_epoch, f_err_epoch, avg_err,
                        elapsed_time) + '\n')

    print("--------------------------Best ", args.best_n,
          " configuration------------------------")
    log_file.write("-----------------------------Best " + str(args.best_n) +
                   " configuration---------------------" + '\n')
    biggest_indexes = np.array(score).argsort()[-args.best_n:]
    print("biggest_index: ", biggest_indexes)
    for arr_index, index in enumerate(biggest_indexes):
        print("&&&&&&&&&&&&&&&&&&&& ", arr_index, " &&&&&&&&&&&&&&&&&&&&&&")
        log_file.write("&&&&&&&&&&&&&&&&&&&& " + str(arr_index) +
                       " &&&&&&&&&&&&&&&&&&&&&&" + '\n')
        curr_arg = param_set[index]
        write_to_file(log_file, curr_arg)
        print_to_screen(curr_arg)
        print("score: ", score)
        print('error = {:.3f}, time = {:.3f}'.format(curr_arg.avg_err,
                                                     curr_arg.time))
        log_file.write('error = {:.3f}, time = {:.3f}'.format(
            curr_arg.avg_err, curr_arg.time) + '\n')