コード例 #1
0
def get_model(index, arguments, infer=False):
    # return a model given index and arguments
    if index == 1:
        return SocialModel(arguments, infer)
    elif index == 2:
        return OLSTMModel(arguments, infer)
    elif index == 3:
        return VLSTMModel(arguments, infer)
    else:
        return SocialModel(arguments, infer)
コード例 #2
0
def train(args):
    origin = (0,0)
    reference_point = (0,1)
    validation_dataset_executed = False
  
    prefix = ''  # prefix = ''
    f_prefix = args.data_dir
    # if args.drive is True:
    #   prefix='drive/semester_project/social_lstm_final/'
    #   f_prefix = 'drive/semester_project/social_lstm_final'

    print('data_dir:', args.data_dir)
    # if not os.path.isdir("log/"):
    #   print("Directory creation script is running...")
    #   subprocess.call(['make_directories.sh'])

    args.freq_validation = np.clip(args.freq_validation, 0, args.num_epochs)
    validation_epoch_list = list(range(args.freq_validation, args.num_epochs+1, args.freq_validation))
    validation_epoch_list[-1]-=1



    # Create the data loader object. This object would preprocess the data in terms of
    # batches each of size args.batch_size, of length args.seq_length
    dataloader = DataLoader(f_prefix, args.batch_size, args.seq_length, args.num_validation, forcePreProcess=True)

    model_name = "LSTM"
    method_name = "SOCIALLSTM"
    save_tar_name = method_name+"_lstm_model_"
    if args.gru:
        model_name = "GRU"
        save_tar_name = method_name+"_gru_model_"


    # Log directory
    log_directory = os.path.join(prefix, 'log/')
    plot_directory = os.path.join(prefix, 'plot/', method_name, model_name)
    plot_train_file_directory = 'validation'



    # Logging files
    log_file_curve = open(os.path.join(log_directory, method_name, model_name,'log_curve.txt'), 'w+')
    log_file = open(os.path.join(log_directory, method_name, model_name, 'val.txt'), 'w+')

    # model directory
    save_directory = os.path.join(prefix, 'model/')
    
    # Save the arguments int the config file
    import json
    with open(os.path.join(save_directory, method_name, model_name,'config.pkl'), 'wb') as f:
        args_dict = vars(args)
        pickle.dump(args, f)

    # Path to store the checkpoint file
    def checkpoint_path(x):
        return os.path.join(save_directory, method_name, model_name, save_tar_name+str(x)+'.tar')

    # model creation
    net = SocialModel(args)
    if args.use_cuda:
        net = net.cuda()

    #optimizer = torch.optim.RMSprop(net.parameters(), lr=args.learning_rate)
    optimizer = torch.optim.Adagrad(net.parameters(), weight_decay=args.lambda_param)
    #optimizer = torch.optim.Adam(net.parameters(), weight_decay=args.lambda_param)

    learning_rate = args.learning_rate

    best_val_loss = 100
    best_val_data_loss = 100

    smallest_err_val = 100000
    smallest_err_val_data = 100000


    best_epoch_val = 0
    best_epoch_val_data = 0

    best_err_epoch_val = 0
    best_err_epoch_val_data = 0

    all_epoch_results = []
    grids = []
    num_batch = 0
    dataset_pointer_ins_grid = -1

    [grids.append([]) for dataset in range(dataloader.get_len_of_dataset())]

    # Training
    for epoch in range(args.num_epochs):
        print('****************Training epoch beginning******************')
        if dataloader.additional_validation and (epoch-1) in validation_epoch_list:
            dataloader.switch_to_dataset_type(True)
        dataloader.reset_batch_pointer(valid=False)
        loss_epoch = 0

        # For each batch
        for batch in range(dataloader.num_batches):
            start = time.time()

            # Get batch data
            x, y, d , numPedsList, PedsList ,target_ids= dataloader.next_batch()
            loss_batch = 0
            
            #if we are in a new dataset, zero the counter of batch
            if dataset_pointer_ins_grid is not dataloader.dataset_pointer and epoch is not 0:
                num_batch = 0
                dataset_pointer_ins_grid = dataloader.dataset_pointer


            # For each sequence
            for sequence in range(dataloader.batch_size):
                # Get the data corresponding to the current sequence
                x_seq ,_ , d_seq, numPedsList_seq, PedsList_seq = x[sequence], y[sequence], d[sequence], numPedsList[sequence], PedsList[sequence]

                target_id = target_ids[sequence]

                #get processing file name and then get dimensions of file
                folder_name = dataloader.get_directory_name_with_pointer(d_seq)
                dataset_data = dataloader.get_dataset_dimension(folder_name)

                #dense vector creation
                x_seq, lookup_seq = dataloader.convert_proper_array(x_seq, numPedsList_seq, PedsList_seq)

                target_id_values = x_seq[0][lookup_seq[target_id], 0:2]

                #grid mask calculation and storage depending on grid parameter
                if(args.grid):
                    if(epoch is 0):
                        grid_seq = getSequenceGridMask(x_seq, dataset_data, PedsList_seq,args.neighborhood_size, args.grid_size, args.use_cuda)
                        grids[dataloader.dataset_pointer].append(grid_seq)
                    else:
                        grid_seq = grids[dataloader.dataset_pointer][(num_batch*dataloader.batch_size)+sequence]
                else:
                    grid_seq = getSequenceGridMask(x_seq, dataset_data, PedsList_seq,args.neighborhood_size, args.grid_size, args.use_cuda)

                # vectorize trajectories in sequence
                if args.use_cuda:
                    x_seq = x_seq.cuda()
                x_seq, _ = vectorize_seq(x_seq, PedsList_seq, lookup_seq)

                
                
                # <---------------------- Experimental block ----------------------->
                # Main approach:
                # 1) Translate all trajectories using first frame value of target trajectory so that target trajectory will start (0,0).
                # 2) Get angle between first trajectory point of target ped and (0, 1) for turning.
                # 3) Rotate all trajectories in the sequence using this angle.
                # 4) Calculate grid mask for hidden layer pooling.
                # 5) Vectorize all trajectories (substract first frame values of each trajectories from subsequent points in the trajectory).
                #
                # Problem:
                #  Low accuracy
                #
                # Possible causes:
                # *Each function has been already checked -> low possibility.
                # *Logic errors or algorithm errors -> high possibility.
                # *Wrong order of execution each step -> high possibility.
                # <------------------------------------------------------------------------>

                # x_seq = translate(x_seq, PedsList_seq, lookup_seq ,target_id_values)

                # angle = angle_between(reference_point, (x_seq[1][lookup_seq[target_id], 0].data.numpy(), x_seq[1][lookup_seq[target_id], 1].data.numpy()))

                # x_seq = rotate_traj_with_target_ped(x_seq, angle, PedsList_seq, lookup_seq)
                
                # if(args.grid):
                #     if(epoch is 0):
                #         grid_seq = getSequenceGridMask(x_seq, dataset_data, PedsList_seq,args.neighborhood_size, args.grid_size, args.use_cuda)
                #         grids[dataloader.dataset_pointer].append(grid_seq)
                #     else:
                #         #grid_seq1 = getSequenceGridMask(x_seq, dataset_data, PedsList_seq,args.neighborhood_size, args.grid_size, args.use_cuda)
                #         grid_seq = grids[dataloader.dataset_pointer][(num_batch*dataloader.batch_size)+sequence]
                #         #print([ torch.equal(x.data, y.data) for (x,y) in zip(grid_seq1, grid_seq)])
                #         #if not (all([ torch.equal(x.data, y.data) for (x,y) in zip(grid_seq1, grid_seq)])):
                #         #    print("not equal")
                #         #    quit()
                # else:
                #     grid_seq = getSequenceGridMask(x_seq, dataset_data, PedsList_seq,args.neighborhood_size, args.grid_size, args.use_cuda)
                
                # x_seq, first_values_dict = vectorize_seq(x_seq, PedsList_seq, lookup_seq)

                
                #print(grid_seq)

                  # Construct variables
                #print("target id : ", target_id)
                #print("look up : ", lookup_seq)
                #print("pedlist_seq: ", PedsList_seq)
                #print("before_xseq: ", x_seq)
                #x_seq, target_id_values, first_values_dict = vectorize_seq_with_ped(x_seq, PedsList_seq, lookup_seq ,target_id)
                #print("after_vectorize_seq: ", x_seq)
                #print("angle: ", np.rad2deg(angle))
                #print("after_xseq: ", x_seq)
                #x_seq = rotate_traj_with_target_ped(x_seq, -angle, PedsList_seq, lookup_seq)
                #x_seq = revert_seq(x_seq, PedsList_seq, lookup_seq, first_values_dict)





                #number of peds in this sequence per frame
                numNodes = len(lookup_seq)


                hidden_states = Variable(torch.zeros(numNodes, args.rnn_size))
                if args.use_cuda:                    
                    hidden_states = hidden_states.cuda()

                cell_states = Variable(torch.zeros(numNodes, args.rnn_size))
                if args.use_cuda:                    
                    cell_states = cell_states.cuda()

                # Zero out gradients
                net.zero_grad()
                optimizer.zero_grad()
                

                # Forward prop
                outputs, _, _ = net(x_seq, grid_seq, hidden_states, cell_states, PedsList_seq,numPedsList_seq ,dataloader, lookup_seq)

                
                # Compute loss
                loss = Gaussian2DLikelihood(outputs, x_seq, PedsList_seq, lookup_seq)
                loss_batch += loss.item()

                # Compute gradients
                loss.backward()
                

                # Clip gradients
                torch.nn.utils.clip_grad_norm_(net.parameters(), args.grad_clip)

                # Update parameters
                optimizer.step()

            end = time.time()
            loss_batch = loss_batch / dataloader.batch_size
            loss_epoch += loss_batch
            num_batch+=1

            print('{}/{} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}'.format(epoch * dataloader.num_batches + batch,
                                                                                    args.num_epochs * dataloader.num_batches,
                                                                                    epoch,
                                                                                    loss_batch, end - start))

        loss_epoch /= dataloader.num_batches
        # Log loss values
        log_file_curve.write("Training epoch: "+str(epoch)+" loss: "+str(loss_epoch)+'\n')

        if dataloader.valid_num_batches > 0:
            print('****************Validation epoch beginning******************')

            # Validation
            dataloader.reset_batch_pointer(valid=True)
            loss_epoch = 0
            err_epoch = 0

            # For each batch
            for batch in range(dataloader.valid_num_batches):
                # Get batch data
                x, y, d , numPedsList, PedsList ,target_ids= dataloader.next_valid_batch()

                # Loss for this batch
                loss_batch = 0
                err_batch = 0


                # For each sequence
                for sequence in range(dataloader.batch_size):
                    # Get data corresponding to the current sequence
                    x_seq ,_ , d_seq, numPedsList_seq, PedsList_seq = x[sequence], y[sequence], d[sequence], numPedsList[sequence], PedsList[sequence]

                    target_id = target_ids[sequence]

                    #get processing file name and then get dimensions of file
                    folder_name = dataloader.get_directory_name_with_pointer(d_seq)
                    dataset_data = dataloader.get_dataset_dimension(folder_name)
                    
                    #dense vector creation
                    x_seq, lookup_seq = dataloader.convert_proper_array(x_seq, numPedsList_seq, PedsList_seq)

                    target_id_values = x_seq[0][lookup_seq[target_id], 0:2]
                    
                    #get grid mask
                    grid_seq = getSequenceGridMask(x_seq, dataset_data, PedsList_seq, args.neighborhood_size, args.grid_size, args.use_cuda)

                    if args.use_cuda:
                        x_seq = x_seq.cuda()
                    x_seq, first_values_dict = vectorize_seq(x_seq, PedsList_seq, lookup_seq)


                    # <---------------------- Experimental block ----------------------->
                    # x_seq = translate(x_seq, PedsList_seq, lookup_seq ,target_id_values)
                    # angle = angle_between(reference_point, (x_seq[1][lookup_seq[target_id], 0].data.numpy(), x_seq[1][lookup_seq[target_id], 1].data.numpy()))
                    # x_seq = rotate_traj_with_target_ped(x_seq, angle, PedsList_seq, lookup_seq)
                    # grid_seq = getSequenceGridMask(x_seq, dataset_data, PedsList_seq, args.neighborhood_size, args.grid_size, args.use_cuda)
                    # x_seq, first_values_dict = vectorize_seq(x_seq, PedsList_seq, lookup_seq)


                    #number of peds in this sequence per frame
                    numNodes = len(lookup_seq)

                    hidden_states = Variable(torch.zeros(numNodes, args.rnn_size))
                    if args.use_cuda:                    
                        hidden_states = hidden_states.cuda()
                    cell_states = Variable(torch.zeros(numNodes, args.rnn_size))
                    if args.use_cuda:                    
                        cell_states = cell_states.cuda()

                    # Forward prop
                    outputs, _, _ = net(x_seq[:-1], grid_seq[:-1], hidden_states, cell_states, PedsList_seq[:-1], numPedsList_seq , dataloader, lookup_seq)

                    # Compute loss
                    loss = Gaussian2DLikelihood(outputs, x_seq[1:], PedsList_seq[1:], lookup_seq)
                    # Extract the mean, std and corr of the bivariate Gaussian
                    mux, muy, sx, sy, corr = getCoef(outputs)
                    # Sample from the bivariate Gaussian
                    next_x, next_y = sample_gaussian_2d(mux.data, muy.data, sx.data, sy.data, corr.data, PedsList_seq[-1], lookup_seq)
                    next_vals = torch.FloatTensor(1,numNodes,2)
                    next_vals[:,:,0] = next_x
                    next_vals[:,:,1] = next_y
                    err = get_mean_error(next_vals, x_seq[-1].data[None, : ,:], [PedsList_seq[-1]], [PedsList_seq[-1]], args.use_cuda, lookup_seq)

                    loss_batch += loss.item()
                    err_batch += err

                loss_batch = loss_batch / dataloader.batch_size
                err_batch = err_batch / dataloader.batch_size
                loss_epoch += loss_batch
                err_epoch += err_batch

            if dataloader.valid_num_batches != 0:            
                loss_epoch = loss_epoch / dataloader.valid_num_batches
                err_epoch = err_epoch / dataloader.num_batches


                # Update best validation loss until now
                if loss_epoch < best_val_loss:
                    best_val_loss = loss_epoch
                    best_epoch_val = epoch

                if err_epoch<smallest_err_val:
                    smallest_err_val = err_epoch
                    best_err_epoch_val = epoch

                print('(epoch {}), valid_loss = {:.3f}, valid_err = {:.3f}'.format(epoch, loss_epoch, err_epoch))
                print('Best epoch', best_epoch_val, 'Best validation loss', best_val_loss, 'Best error epoch',best_err_epoch_val, 'Best error', smallest_err_val)
                log_file_curve.write("Validation epoch: "+str(epoch)+" loss: "+str(loss_epoch)+" err: "+str(err_epoch)+'\n')


        # Validation dataset
        if dataloader.additional_validation and (epoch) in validation_epoch_list:
            dataloader.switch_to_dataset_type()
            print('****************Validation with dataset epoch beginning******************')
            dataloader.reset_batch_pointer(valid=False)
            dataset_pointer_ins = dataloader.dataset_pointer
            validation_dataset_executed = True

            loss_epoch = 0
            err_epoch = 0
            f_err_epoch = 0
            num_of_batch = 0
            smallest_err = 100000

            #results of one epoch for all validation datasets
            epoch_result = []
            #results of one validation dataset
            results = []



            # For each batch
            for batch in range(dataloader.num_batches):
                # Get batch data
                x, y, d , numPedsList, PedsList ,target_ids = dataloader.next_batch()

                if dataset_pointer_ins is not dataloader.dataset_pointer:
                    if dataloader.dataset_pointer is not 0:
                        print('Finished prosessed file : ', dataloader.get_file_name(-1),' Avarage error : ', err_epoch/num_of_batch)
                        num_of_batch = 0
                        epoch_result.append(results)

                    dataset_pointer_ins = dataloader.dataset_pointer
                    results = []



                # Loss for this batch
                loss_batch = 0
                err_batch = 0
                f_err_batch = 0

                # For each sequence
                for sequence in range(dataloader.batch_size):
                    # Get data corresponding to the current sequence
                    x_seq ,_ , d_seq, numPedsList_seq, PedsList_seq = x[sequence], y[sequence], d[sequence], numPedsList[sequence], PedsList[sequence]
                    target_id = target_ids[sequence]

                    #get processing file name and then get dimensions of file
                    folder_name = dataloader.get_directory_name_with_pointer(d_seq)
                    dataset_data = dataloader.get_dataset_dimension(folder_name)
                    
                    #dense vector creation
                    x_seq, lookup_seq = dataloader.convert_proper_array(x_seq, numPedsList_seq, PedsList_seq)
                    
                    #will be used for error calculation
                    orig_x_seq = x_seq.clone() 
                    
                    target_id_values = orig_x_seq[0][lookup_seq[target_id], 0:2]
                    
                    #grid mask calculation
                    grid_seq = getSequenceGridMask(x_seq, dataset_data, PedsList_seq, args.neighborhood_size, args.grid_size, args.use_cuda)
                    
                    #vectorize datapoints
                    if args.use_cuda:
                        x_seq = x_seq.cuda()
                    x_seq, first_values_dict = vectorize_seq(x_seq, PedsList_seq, lookup_seq)

                    # <---------------------- Experimental block ----------------------->
                    # x_seq = translate(x_seq, PedsList_seq, lookup_seq ,target_id_values)
                    # angle = angle_between(reference_point, (x_seq[1][lookup_seq[target_id], 0].data.numpy(), x_seq[1][lookup_seq[target_id], 1].data.numpy()))
                    # x_seq = rotate_traj_with_target_ped(x_seq, angle, PedsList_seq, lookup_seq)
                    # grid_seq = getSequenceGridMask(x_seq, dataset_data, PedsList_seq, args.neighborhood_size, args.grid_size, args.use_cuda)
                    # x_seq, first_values_dict = vectorize_seq(x_seq, PedsList_seq, lookup_seq)


                    if args.use_cuda:                    
                        x_seq = x_seq.cuda()

                    #sample predicted points from model
                    ret_x_seq, loss = sample_validation_data(x_seq, PedsList_seq, grid_seq, args, net, lookup_seq, numPedsList_seq, dataloader)
                    
                    #revert the points back to original space
                    ret_x_seq = revert_seq(ret_x_seq, PedsList_seq, lookup_seq, first_values_dict)

                    # <---------------------- Experimental block revert----------------------->
                    # Revert the calculated coordinates back to original space:
                    # 1) Convert point from vectors to absolute coordinates
                    # 2) Rotate all trajectories in reverse angle
                    # 3) Translate all trajectories back to original space by adding the first frame value of target ped trajectory
                    
                    # *It works without problems which mean that it reverts a trajectory back completely
                    
                    # Possible problems:
                    # *Algoritmical errors caused by first experimental block -> High possiblity
                    # <------------------------------------------------------------------------>

                    # ret_x_seq = revert_seq(ret_x_seq, PedsList_seq, lookup_seq, first_values_dict)

                    # ret_x_seq = rotate_traj_with_target_ped(ret_x_seq, -angle, PedsList_seq, lookup_seq)

                    # ret_x_seq = translate(ret_x_seq, PedsList_seq, lookup_seq ,-target_id_values)

                    #get mean and final error
                    err = get_mean_error(ret_x_seq.data, orig_x_seq.data, PedsList_seq, PedsList_seq, args.use_cuda, lookup_seq)
                    f_err = get_final_error(ret_x_seq.data, orig_x_seq.data, PedsList_seq, PedsList_seq, args.use_cuda, lookup_seq)
                    
                    loss_batch += loss.item()
                    err_batch += err
                    f_err_batch += f_err
                    print('Current file : ', dataloader.get_file_name(0),' Batch : ', batch+1, ' Sequence: ', sequence+1, ' Sequence mean error: ', err,' Sequence final error: ',f_err,' time: ', end - start)
                    results.append((orig_x_seq.data.cpu().numpy(), ret_x_seq.data.cpu().numpy(), PedsList_seq, lookup_seq, dataloader.get_frame_sequence(args.seq_length), target_id))

                loss_batch = loss_batch / dataloader.batch_size
                err_batch = err_batch / dataloader.batch_size
                f_err_batch = f_err_batch / dataloader.batch_size
                num_of_batch += 1
                loss_epoch += loss_batch
                err_epoch += err_batch
                f_err_epoch += f_err_batch

            epoch_result.append(results)
            all_epoch_results.append(epoch_result)


            if dataloader.num_batches != 0:            
                loss_epoch = loss_epoch / dataloader.num_batches
                err_epoch = err_epoch / dataloader.num_batches
                f_err_epoch = f_err_epoch / dataloader.num_batches
                avarage_err = (err_epoch + f_err_epoch)/2

                # Update best validation loss until now
                if loss_epoch < best_val_data_loss:
                    best_val_data_loss = loss_epoch
                    best_epoch_val_data = epoch

                if avarage_err<smallest_err_val_data:
                    smallest_err_val_data = avarage_err
                    best_err_epoch_val_data = epoch

                print('(epoch {}), valid_loss = {:.3f}, valid_mean_err = {:.3f}, valid_final_err = {:.3f}'.format(epoch, loss_epoch, err_epoch, f_err_epoch))
                print('Best epoch', best_epoch_val_data, 'Best validation loss', best_val_data_loss, 'Best error epoch',best_err_epoch_val_data, 'Best error', smallest_err_val_data)
                log_file_curve.write("Validation dataset epoch: "+str(epoch)+" loss: "+str(loss_epoch)+" mean_err: "+str(err_epoch)+'final_err: '+str(f_err_epoch)+'\n')

            optimizer = time_lr_scheduler(optimizer, epoch, lr_decay_epoch = args.freq_optimizer)


        # Save the model after each epoch
        print('Saving model')
        torch.save({
            'epoch': epoch,
            'state_dict': net.state_dict(),
            'optimizer_state_dict': optimizer.state_dict()
        }, checkpoint_path(epoch))




    if dataloader.valid_num_batches != 0:        
        print('Best epoch', best_epoch_val, 'Best validation Loss', best_val_loss, 'Best error epoch',best_err_epoch_val, 'Best error', smallest_err_val)
        # Log the best epoch and best validation loss
        log_file.write('Validation Best epoch:'+str(best_epoch_val)+','+' Best validation Loss: '+str(best_val_loss))

    if dataloader.additional_validation:
        print('Best epoch acording to validation dataset', best_epoch_val_data, 'Best validation Loss', best_val_data_loss, 'Best error epoch',best_err_epoch_val_data, 'Best error', smallest_err_val_data)
        log_file.write("Validation dataset Best epoch: "+str(best_epoch_val_data)+','+' Best validation Loss: '+str(best_val_data_loss)+'\n')
        #dataloader.write_to_plot_file(all_epoch_results[best_epoch_val_data], plot_directory)

    #elif dataloader.valid_num_batches != 0:
    #    dataloader.write_to_plot_file(all_epoch_results[best_epoch_val], plot_directory)

    #else:
    if validation_dataset_executed:
        dataloader.switch_to_dataset_type(load_data=False)
        create_directories(plot_directory, [plot_train_file_directory])
        dataloader.write_to_plot_file(all_epoch_results[len(all_epoch_results)-1], os.path.join(plot_directory, plot_train_file_directory))

    # Close logging files
    log_file.close()
    log_file_curve.close()
コード例 #3
0
ファイル: train.py プロジェクト: paeccher/sns-lstm
def main():
    # Parse the arguments received from command line
    parser = argparse.ArgumentParser(description="Train a social LSTM")
    parser.add_argument(
        "modelParams",
        type=str,
        help=
        "Path to the file or folder with the parameters of the experiments",
    )
    parser.add_argument(
        "-l",
        "--logLevel",
        help="logging level of the logger. Default is INFO",
        metavar="level",
        type=str,
    )
    parser.add_argument(
        "-f",
        "--logFolder",
        help=
        "path to the folder where to save the logs. If None, logs are only printed in stderr",
        type=str,
        metavar="path",
    )
    args = parser.parse_args()

    if os.path.isdir(args.modelParams):
        names_experiments = os.listdir(args.modelParams)
        experiments = [
            os.path.join(args.modelParams, experiment)
            for experiment in names_experiments
        ]
    else:
        experiments = [args.modelParams]

    for experiment in experiments:
        # Load the parameters
        hparams = utils.YParams(experiment)
        # Define the logger
        setLogger(hparams, args, PHASE)

        remainSpaces = 29 - len(hparams.name)
        logging.info(
            "\n" +
            "--------------------------------------------------------------------------------\n"
            + "|                            Training experiment: " +
            hparams.name + " " * remainSpaces + "|\n" +
            "--------------------------------------------------------------------------------\n"
        )

        trajectory_size = hparams.obsLen + hparams.predLen

        logging.info("Loading the training datasets...")
        train_loader = utils.DataLoader(
            hparams.dataPath,
            hparams.trainDatasets,
            hparams.trainMaps,
            hparams.semanticMaps,
            hparams.trainMapping,
            hparams.homography,
            num_labels=hparams.numLabels,
            delimiter=hparams.delimiter,
            skip=hparams.skip,
            max_num_ped=hparams.maxNumPed,
            trajectory_size=trajectory_size,
            neighborood_size=hparams.neighborhoodSize,
        )
        logging.info("Loading the validation datasets...")
        val_loader = utils.DataLoader(
            hparams.dataPath,
            hparams.validationDatasets,
            hparams.validationMaps,
            hparams.semanticMaps,
            hparams.validationMapping,
            hparams.homography,
            num_labels=hparams.numLabels,
            delimiter=hparams.delimiter,
            skip=hparams.skip,
            max_num_ped=hparams.maxNumPed,
            trajectory_size=trajectory_size,
            neighborood_size=hparams.neighborhoodSize,
        )

        logging.info(
            "Creating the training and validation dataset pipeline...")
        dataset = utils.TrajectoriesDataset(
            train_loader,
            val_loader=val_loader,
            batch=False,
            shuffle=hparams.shuffle,
            prefetch_size=hparams.prefetchSize,
        )

        hparams.add_hparam("learningRateSteps", train_loader.num_sequences)

        logging.info("Creating the model...")
        start = time.time()
        model = SocialModel(dataset, hparams, phase=PHASE)
        end = time.time() - start
        logging.debug("Model created in {:.2f}s".format(end))

        # Define the path to where save the model and the checkpoints
        if hparams.modelFolder:
            save_model = True
            model_folder = os.path.join(hparams.modelFolder, hparams.name)
            if not os.path.exists(model_folder):
                os.makedirs(model_folder)
                os.makedirs(os.path.join(model_folder, "checkpoints"))
            model_path = os.path.join(model_folder, hparams.name)
            checkpoints_path = os.path.join(model_folder, "checkpoints",
                                            hparams.name)
            # Create the saver
            saver = tf.train.Saver()

        # Zero padding
        padding = len(str(train_loader.num_sequences))

        # ============================ START TRAINING ============================

        with tf.Session() as sess:
            logging.info(
                "\n" +
                "--------------------------------------------------------------------------------\n"
                +
                "|                                Start training                                |\n"
                +
                "--------------------------------------------------------------------------------\n"
            )
            # Initialize all the variables in the graph
            sess.run(tf.global_variables_initializer())

            for epoch in range(hparams.epochs):
                logging.info("Starting epoch {}".format(epoch + 1))

                # ==================== TRAINING PHASE ====================

                # Initialize the iterator of the training dataset
                sess.run(dataset.init_train)

                for sequence in range(train_loader.num_sequences):
                    start = time.time()
                    loss, _ = sess.run([model.loss, model.train_optimizer])
                    end = time.time() - start

                    logging.info(
                        "{:{width}d}/{} epoch: {} time/Batch = {:.2f}s. Loss = {:.4f}"
                        .format(
                            sequence + 1,
                            train_loader.num_sequences,
                            epoch + 1,
                            end,
                            loss,
                            width=padding,
                        ))

                # ==================== VALIDATION PHASE ====================

                logging.info(" ========== Validation ==========")
                # Initialize the iterator of the validation dataset
                sess.run(dataset.init_val)
                loss_val = 0

                for _ in range(val_loader.num_sequences):
                    loss = sess.run(model.loss)
                    loss_val += loss

                mean_val = loss_val / val_loader.num_sequences

                logging.info("Epoch: {}. Validation loss = {:.4f}".format(
                    epoch + 1, mean_val))

                # Save the model
                if save_model:
                    logging.info("Saving model...")
                    saver.save(
                        sess,
                        checkpoints_path,
                        global_step=epoch + 1,
                        write_meta_graph=False,
                    )
                    logging.info("Model saved...")
            # Save the final model
            if save_model:
                saver.save(sess, model_path)
        tf.reset_default_graph()
コード例 #4
0
def train(args):
    origin = (0, 0)
    reference_point = (0, 1)
    validation_dataset_executed = False

    # 这一片是否可以去掉
    prefix = ''
    f_prefix = '.'
    if args.drive is True:
        prefix = 'drive/semester_project/social_lstm_final/'
        f_prefix = 'drive/semester_project/social_lstm_final'
    # 用于从云端读取数据

    if not os.path.isdir("log/"):
        print("Directory creation script is running...")
        subprocess.call([f_prefix + '/make_directories.sh'])

    args.freq_validation = np.clip(args.freq_validation, 0, args.num_epochs)
    validation_epoch_list = list(
        range(args.freq_validation, args.num_epochs + 1, args.freq_validation))
    validation_epoch_list[-1] -= 1

    # Create the data loader object. This object would preprocess the data in terms of
    # batches each of size args.batch_size, of length args.seq_length
    # 读取数据
    dataloader = DataLoader(f_prefix,
                            args.batch_size,
                            args.seq_length,
                            args.num_validation,
                            forcePreProcess=True)

    model_name = "LSTM"
    method_name = "SOCIALLSTM"
    save_tar_name = method_name + "_lstm_model_"
    if args.gru:
        model_name = "GRU"
        save_tar_name = method_name + "_gru_model_"

    # Log directory
    log_directory = os.path.join(prefix, 'log/')
    plot_directory = os.path.join(prefix, 'plot/', method_name, model_name)
    # print(plot_directory)
    plot_train_file_directory = 'validation'

    # Logging files
    log_file_curve = open(
        os.path.join(log_directory, method_name, model_name, 'log_curve.txt'),
        'w+')
    log_file = open(
        os.path.join(log_directory, method_name, model_name, 'val.txt'), 'w+')

    # model directory
    save_directory = os.path.join(prefix, 'model/')

    # Save the arguments int the config file
    # 将参数保存在配置文件中
    with open(
            os.path.join(save_directory, method_name, model_name,
                         'config.pkl'), 'wb') as f:
        pickle.dump(args, f)

    # Path to store the checkpoint file
    # 存储检查点文件的路径
    def checkpoint_path(x):
        return os.path.join(save_directory, method_name, model_name,
                            save_tar_name + str(x) + '.tar')

    # model creation
    net = SocialModel(args)
    if args.use_cuda:
        net = net.cuda()

    optimizer = torch.optim.Adagrad(net.parameters(),
                                    weight_decay=args.lambda_param)

    learning_rate = args.learning_rate

    best_val_loss = 100
    best_val_data_loss = 100

    smallest_err_val = 100000
    smallest_err_val_data = 100000

    best_epoch_val = 0
    best_epoch_val_data = 0

    best_err_epoch_val = 0
    best_err_epoch_val_data = 0

    all_epoch_results = []
    grids = []
    num_batch = 0
    dataset_pointer_ins_grid = -1

    [grids.append([]) for dataset in range(dataloader.get_len_of_dataset())]

    # Training
    for epoch in range(args.num_epochs):
        print('****************Training epoch beginning******************')
        if dataloader.additional_validation and (epoch -
                                                 1) in validation_epoch_list:
            dataloader.switch_to_dataset_type(True)
        dataloader.reset_batch_pointer(valid=False)
        loss_epoch = 0

        # For each batch
        for batch in range(dataloader.num_batches):
            start = time.time()

            # Get batch data
            x, y, d, numPedsList, PedsList, target_ids = dataloader.next_batch(
            )
            loss_batch = 0

            # 如果我们在新数据集中,则将批处理计数器清零
            if dataset_pointer_ins_grid is not dataloader.dataset_pointer and epoch is not 0:
                num_batch = 0
                dataset_pointer_ins_grid = dataloader.dataset_pointer

            # For each sequence
            for sequence in range(dataloader.batch_size):
                # 获取与当前序列相对应的数据
                x_seq, _, d_seq, numPedsList_seq, PedsList_seq = x[
                    sequence], y[sequence], d[sequence], numPedsList[
                        sequence], PedsList[sequence]
                target_id = target_ids[sequence]

                # 获取处理文件名,然后获取文件尺寸
                folder_name = dataloader.get_directory_name_with_pointer(d_seq)
                dataset_data = dataloader.get_dataset_dimension(folder_name)

                # dense vector creation
                # 密集矢量创建
                x_seq, lookup_seq = dataloader.convert_proper_array(
                    x_seq, numPedsList_seq, PedsList_seq)
                target_id_values = x_seq[0][lookup_seq[target_id], 0:2]

                # grid mask calculation and storage depending on grid parameter
                # 网格掩码的计算和存储取决于网格参数
                # 应该是用于判断是否有social性
                if (args.grid):
                    if (epoch is 0):
                        grid_seq = getSequenceGridMask(x_seq, dataset_data,
                                                       PedsList_seq,
                                                       args.neighborhood_size,
                                                       args.grid_size,
                                                       args.use_cuda)
                        grids[dataloader.dataset_pointer].append(grid_seq)
                    else:
                        temp = (num_batch * dataloader.batch_size) + sequence
                        if temp > 128:
                            temp = 128
                        grid_seq = grids[dataloader.dataset_pointer][temp]
                else:
                    grid_seq = getSequenceGridMask(x_seq, dataset_data,
                                                   PedsList_seq,
                                                   args.neighborhood_size,
                                                   args.grid_size,
                                                   args.use_cuda)

                # 按顺序矢量化轨迹
                x_seq, _ = vectorize_seq(x_seq, PedsList_seq, lookup_seq)
                if args.use_cuda:
                    x_seq = x_seq.cuda()

                # number of peds in this sequence per frame
                # 每帧此序列中的ped数
                numNodes = len(lookup_seq)

                hidden_states = Variable(torch.zeros(numNodes, args.rnn_size))
                if args.use_cuda:
                    hidden_states = hidden_states.cuda()

                cell_states = Variable(torch.zeros(numNodes, args.rnn_size))
                if args.use_cuda:
                    cell_states = cell_states.cuda()

                # 零梯度
                net.zero_grad()
                optimizer.zero_grad()

                # Forward prop
                outputs, _, _ = net(x_seq, grid_seq, hidden_states,
                                    cell_states, PedsList_seq, numPedsList_seq,
                                    dataloader, lookup_seq)

                # 计算损失loss
                loss = Gaussian2DLikelihood(outputs, x_seq, PedsList_seq,
                                            lookup_seq)
                loss_batch += loss.item()

                # 计算梯度
                loss.backward()

                # 裁剪梯度
                torch.nn.utils.clip_grad_norm_(net.parameters(),
                                               args.grad_clip)

                # 更新梯度
                optimizer.step()

            end = time.time()
            loss_batch = loss_batch / dataloader.batch_size
            loss_epoch += loss_batch
            num_batch += 1

            print('{}/{} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}'.
                  format(epoch * dataloader.num_batches + batch,
                         args.num_epochs * dataloader.num_batches, epoch,
                         loss_batch, end - start))

        loss_epoch /= dataloader.num_batches
        # 记录loss
        log_file_curve.write("Training epoch: " + str(epoch) + " loss: " +
                             str(loss_epoch) + '\n')

        if dataloader.valid_num_batches > 0:
            print(
                '****************Validation epoch beginning******************')

            # Validation
            dataloader.reset_batch_pointer(valid=True)
            loss_epoch = 0
            err_epoch = 0

            # 每一个batch
            for batch in range(dataloader.valid_num_batches):
                # 获取batch数据
                x, y, d, numPedsList, PedsList, target_ids = dataloader.next_valid_batch(
                )

                # batch的损失loss
                loss_batch = 0
                err_batch = 0

                # 对于每个序列
                for sequence in range(dataloader.batch_size):
                    # 获取与当前序列相对应的数据
                    x_seq, _, d_seq, numPedsList_seq, PedsList_seq = x[
                        sequence], y[sequence], d[sequence], numPedsList[
                            sequence], PedsList[sequence]
                    target_id = target_ids[sequence]

                    # 获取处理文件名,然后获取文件尺寸
                    folder_name = dataloader.get_directory_name_with_pointer(
                        d_seq)
                    dataset_data = dataloader.get_dataset_dimension(
                        folder_name)

                    # 密集矢量创建
                    x_seq, lookup_seq = dataloader.convert_proper_array(
                        x_seq, numPedsList_seq, PedsList_seq)

                    target_id_values = x_seq[0][lookup_seq[target_id], 0:2]

                    # get grid mask
                    # 应该是用于判断是否有social
                    grid_seq = getSequenceGridMask(x_seq, dataset_data,
                                                   PedsList_seq,
                                                   args.neighborhood_size,
                                                   args.grid_size,
                                                   args.use_cuda)

                    x_seq, first_values_dict = vectorize_seq(
                        x_seq, PedsList_seq, lookup_seq)

                    if args.use_cuda:
                        x_seq = x_seq.cuda()

                    # number of peds in this sequence per frame
                    numNodes = len(lookup_seq)

                    hidden_states = Variable(
                        torch.zeros(numNodes, args.rnn_size))
                    if args.use_cuda:
                        hidden_states = hidden_states.cuda()
                    cell_states = Variable(torch.zeros(numNodes,
                                                       args.rnn_size))
                    if args.use_cuda:
                        cell_states = cell_states.cuda()

                    # Forward prop
                    outputs, _, _ = net(x_seq[:-1], grid_seq[:-1],
                                        hidden_states, cell_states,
                                        PedsList_seq[:-1], numPedsList_seq,
                                        dataloader, lookup_seq)

                    # 计算损失loss
                    loss = Gaussian2DLikelihood(outputs, x_seq[1:],
                                                PedsList_seq[1:], lookup_seq)
                    # 提取二元高斯的均值mean,std标准差和corr相关性
                    mux, muy, sx, sy, corr = getCoef(outputs)
                    # 来自二元高斯的样本
                    next_x, next_y = sample_gaussian_2d(
                        mux.data, muy.data, sx.data, sy.data, corr.data,
                        PedsList_seq[-1], lookup_seq)
                    next_vals = torch.FloatTensor(1, numNodes, 2)
                    next_vals[:, :, 0] = next_x
                    next_vals[:, :, 1] = next_y
                    err = get_mean_error(next_vals, x_seq[-1].data[None, :, :],
                                         [PedsList_seq[-1]],
                                         [PedsList_seq[-1]], args.use_cuda,
                                         lookup_seq)

                    loss_batch += loss.item()
                    err_batch += err

                loss_batch = loss_batch / dataloader.batch_size
                err_batch = err_batch / dataloader.batch_size
                loss_epoch += loss_batch
                err_epoch += err_batch

            if dataloader.valid_num_batches != 0:
                loss_epoch = loss_epoch / dataloader.valid_num_batches
                err_epoch = err_epoch / dataloader.num_batches

                # 到目前为止更新最佳验证损失loss
                if loss_epoch < best_val_loss:
                    best_val_loss = loss_epoch
                    best_epoch_val = epoch

                if err_epoch < smallest_err_val:
                    smallest_err_val = err_epoch
                    best_err_epoch_val = epoch

                print('(epoch {}), valid_loss = {:.3f}, valid_err = {:.3f}'.
                      format(epoch, loss_epoch, err_epoch))
                print('Best epoch', best_epoch_val, 'Best validation loss',
                      best_val_loss, 'Best error epoch', best_err_epoch_val,
                      'Best error', smallest_err_val)
                log_file_curve.write("Validation epoch: " + str(epoch) +
                                     " loss: " + str(loss_epoch) + " err: " +
                                     str(err_epoch) + '\n')

        # Validation验证数据集
        if dataloader.additional_validation and (
                epoch) in validation_epoch_list:
            dataloader.switch_to_dataset_type()
            print(
                '****************Validation with dataset epoch beginning******************'
            )
            dataloader.reset_batch_pointer(valid=False)
            dataset_pointer_ins = dataloader.dataset_pointer
            validation_dataset_executed = True

            loss_epoch = 0
            err_epoch = 0
            f_err_epoch = 0
            num_of_batch = 0
            smallest_err = 100000

            # results of one epoch for all validation datasets
            # 所有验证数据集的一个时期的结果
            epoch_result = []
            # results of one validation dataset
            # 一个验证数据集的结果
            results = []

            # For each batch
            for batch in range(dataloader.num_batches):
                # Get batch data
                x, y, d, numPedsList, PedsList, target_ids = dataloader.next_batch(
                )

                if dataset_pointer_ins is not dataloader.dataset_pointer:
                    if dataloader.dataset_pointer is not 0:
                        print('Finished prosessed file : ',
                              dataloader.get_file_name(-1),
                              ' Avarage error : ', err_epoch / num_of_batch)
                        num_of_batch = 0
                        epoch_result.append(results)

                    dataset_pointer_ins = dataloader.dataset_pointer
                    results = []

                # Loss for this batch
                loss_batch = 0
                err_batch = 0
                f_err_batch = 0

                # For each sequence
                for sequence in range(dataloader.batch_size):
                    # Get data corresponding to the current sequence
                    x_seq, _, d_seq, numPedsList_seq, PedsList_seq = x[
                        sequence], y[sequence], d[sequence], numPedsList[
                            sequence], PedsList[sequence]
                    target_id = target_ids[sequence]

                    # get processing file name and then get dimensions of file
                    folder_name = dataloader.get_directory_name_with_pointer(
                        d_seq)
                    dataset_data = dataloader.get_dataset_dimension(
                        folder_name)

                    # dense vector creation
                    x_seq, lookup_seq = dataloader.convert_proper_array(
                        x_seq, numPedsList_seq, PedsList_seq)

                    # will be used for error calculation
                    orig_x_seq = x_seq.clone()

                    target_id_values = orig_x_seq[0][lookup_seq[target_id],
                                                     0:2]

                    # grid mask calculation
                    grid_seq = getSequenceGridMask(x_seq, dataset_data,
                                                   PedsList_seq,
                                                   args.neighborhood_size,
                                                   args.grid_size,
                                                   args.use_cuda)

                    if args.use_cuda:
                        x_seq = x_seq.cuda()
                        orig_x_seq = orig_x_seq.cuda()

                    # 向量化数据点
                    x_seq, first_values_dict = vectorize_seq(
                        x_seq, PedsList_seq, lookup_seq)

                    # 从模型中抽取预测点
                    ret_x_seq, loss = sample_validation_data(
                        x_seq, PedsList_seq, grid_seq, args, net, lookup_seq,
                        numPedsList_seq, dataloader)

                    # 将点还原回原始空间
                    ret_x_seq = revert_seq(ret_x_seq, PedsList_seq, lookup_seq,
                                           first_values_dict)

                    # get mean and final error
                    err = get_mean_error(ret_x_seq.data, orig_x_seq.data,
                                         PedsList_seq, PedsList_seq,
                                         args.use_cuda, lookup_seq)
                    f_err = get_final_error(ret_x_seq.data, orig_x_seq.data,
                                            PedsList_seq, PedsList_seq,
                                            lookup_seq)

                    loss_batch += loss.item()
                    err_batch += err
                    f_err_batch += f_err
                    print('Current file : ', dataloader.get_file_name(0),
                          ' Batch : ', batch + 1, ' Sequence: ', sequence + 1,
                          ' Sequence mean error: ', err,
                          ' Sequence final error: ', f_err, ' time: ',
                          end - start)
                    results.append(
                        (orig_x_seq.data.cpu().numpy(),
                         ret_x_seq.data.cpu().numpy(), PedsList_seq,
                         lookup_seq,
                         dataloader.get_frame_sequence(args.seq_length),
                         target_id))

                loss_batch = loss_batch / dataloader.batch_size
                err_batch = err_batch / dataloader.batch_size
                f_err_batch = f_err_batch / dataloader.batch_size
                num_of_batch += 1
                loss_epoch += loss_batch
                err_epoch += err_batch
                f_err_epoch += f_err_batch

            epoch_result.append(results)
            all_epoch_results.append(epoch_result)

            if dataloader.num_batches != 0:
                loss_epoch = loss_epoch / dataloader.num_batches
                err_epoch = err_epoch / dataloader.num_batches
                f_err_epoch = f_err_epoch / dataloader.num_batches
                avarage_err = (err_epoch + f_err_epoch) / 2

                # Update best validation loss until now
                if loss_epoch < best_val_data_loss:
                    best_val_data_loss = loss_epoch
                    best_epoch_val_data = epoch

                if avarage_err < smallest_err_val_data:
                    smallest_err_val_data = avarage_err
                    best_err_epoch_val_data = epoch

                print('(epoch {}), valid_loss = {:.3f}, '
                      'valid_mean_err = {:.3f}, '
                      'valid_final_err = {:.3f}'.format(
                          epoch, loss_epoch, err_epoch, f_err_epoch))
                print('Best epoch', best_epoch_val_data,
                      'Best validation loss', best_val_data_loss,
                      'Best error epoch', best_err_epoch_val_data,
                      'Best error', smallest_err_val_data)
                log_file_curve.write("Validation dataset epoch: " +
                                     str(epoch) + " loss: " + str(loss_epoch) +
                                     " mean_err: " + str(err_epoch) +
                                     'final_err: ' + str(f_err_epoch) + '\n')

            optimizer = time_lr_scheduler(optimizer,
                                          epoch,
                                          lr_decay_epoch=args.freq_optimizer)

        # Save the model after each epoch
        print('Saving model')
        torch.save(
            {
                'epoch': epoch,
                'state_dict': net.state_dict(),
                'optimizer_state_dict': optimizer.state_dict()
            }, checkpoint_path(epoch))

    if dataloader.valid_num_batches != 0:
        print('Best epoch', best_epoch_val, 'Best validation Loss',
              best_val_loss, 'Best error epoch', best_err_epoch_val,
              'Best error', smallest_err_val)
        # Log the best epoch and best validation loss
        log_file.write('Validation Best epoch:' + str(best_epoch_val) + ',' +
                       ' Best validation Loss: ' + str(best_val_loss))

    if dataloader.additional_validation:
        print('Best epoch acording to validation dataset', best_epoch_val_data,
              'Best validation Loss', best_val_data_loss, 'Best error epoch',
              best_err_epoch_val_data, 'Best error', smallest_err_val_data)
        log_file.write("Validation dataset Best epoch: " +
                       str(best_epoch_val_data) + ',' +
                       ' Best validation Loss: ' + str(best_val_data_loss) +
                       '\n')

    # FileNotFoundError: [Errno 2] No such file or directory: 'plot/SOCIALLSTM\\LSTM\\validation\\biwi\\biwi_hotel_4.pkl'
    # validation_dataset_executed = True
    if validation_dataset_executed:
        # print("用于绘图的文件开始保存了")
        dataloader.switch_to_dataset_type(load_data=False)
        create_directories(plot_directory, [plot_train_file_directory])
        # 找不到这个文件,是我手动添加的
        # print(all_epoch_results)
        # print(len(all_epoch_results) - 1)
        dataloader.write_to_plot_file(
            all_epoch_results[len(all_epoch_results) - 1],
            os.path.join(plot_directory, plot_train_file_directory))

    # Close logging files
    log_file.close()
    log_file_curve.close()
コード例 #5
0
ファイル: train.py プロジェクト: ZhoubinXM/Traj-Prediction
def train(args):
    datasets = list(range(5))
    #从数据集中删除leaveDataset
    datasets.remove(args.leaveDataset)

    # Create the SocialDataLoader object
    data_loader = SocialDataLoader(args.batch_size,
                                   args.seq_length,
                                   args.maxNumPeds,
                                   datasets,
                                   forcePreProcess=True,
                                   infer=False)

    # 日志目录
    log_directory = 'log/'
    log_directory += str(args.leaveDataset) + '/'

    # Logging files
    log_file_curve = open(os.path.join(log_directory, 'log_curve.txt'), 'w')
    log_file = open(os.path.join(log_directory, 'val.txt'), 'w')

    #存储目录
    save_directory = 'save/'
    save_directory += str(args.leaveDataset) + '/'

    with open(os.path.join(save_directory, 'social_config.pkl'), 'wb') as f:
        pickle.dump(args, f)

    # Create a SocialModel object with the arguments
    model = SocialModel(args)

    with tf.Session() as sess:
        # Initialize all variables in the graph
        sess.run(tf.global_variables_initializer())
        # Initialize a saver that saves all the variables in the graph
        saver = tf.train.Saver(tf.global_variables(), max_to_keep=None)

        # summary_writer = tf.train.SummaryWriter('/tmp/lstm/logs', graph_def=sess.graph_def)
        print('Training begin')
        best_val_loss = 100
        best_epoch = 0

        # For each epoch
        for e in range(args.num_epochs):
            # Assign the learning rate value for this epoch
            sess.run(
                tf.assign(model.lr, args.learning_rate * (args.decay_rate**e)))
            # Reset the data pointers in the data_loader
            data_loader.reset_batch_pointer(valid=False)

            loss_epoch = 0

            # For each batch
            for b in range(data_loader.num_batches):

                start = time.time()

                # Get the source, target and dataset data for the next batch
                # 获取下一批的源,目标和数据集数据
                # x, y are input and target data which are lists containing numpy arrays of size seq_length x maxNumPeds x 3
                # x,y是输入和目标数据,它们是包含大小为seq_length x maxNumPeds x 3的numpy数组的列表
                # d is the list of dataset indices from which each batch is generated (used to differentiate between datasets)
                # d是生成每个批次的数据集索引列表(用于区分数据集)
                x, y, d = data_loader.next_batch()

                # variable to store the loss for this batch
                # 变量来存储此批次的损失
                loss_batch = 0

                # For each sequence in the batch
                for batch in range(data_loader.batch_size):
                    # x_batch, y_batch and d_batch contains the source, target and dataset index data for
                    # x_batch,y_batch和d_batch包含源,目标和数据集索引数据
                    # seq_length long consecutive frames in the dataset
                    # seq_length数据集中的长连续帧
                    # x_batch, y_batch would be numpy arrays of size seq_length x maxNumPeds x 3
                    # d_batch would be a scalar identifying the dataset from which this sequence is extracted
                    # d_batch将是一个标量,用于标识从中提取此序列的数据集
                    x_batch, y_batch, d_batch = x[batch], y[batch], d[batch]

                    if d_batch == 0 and datasets[0] == 0:
                        dataset_data = [640, 480]
                    else:
                        dataset_data = [720, 576]

                    grid_batch = getSequenceGridMask(x_batch, dataset_data,
                                                     args.neighborhood_size,
                                                     args.grid_size)

                    # Feed the source, target data
                    # 提供源,目标数据
                    feed = {
                        model.input_data: x_batch,
                        model.target_data: y_batch,
                        model.grid_data: grid_batch
                    }
                    # 运行模型,跑出来一个结果
                    train_loss, _ = sess.run([model.cost, model.train_op],
                                             feed)

                    loss_batch += train_loss

                end = time.time()
                loss_batch = loss_batch / data_loader.batch_size
                loss_epoch += loss_batch
                print(
                    "{}/{} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}"
                    .format(e * data_loader.num_batches + b,
                            args.num_epochs * data_loader.num_batches, e,
                            loss_batch, end - start))

            loss_epoch /= data_loader.num_batches
            log_file_curve.write(str(e) + ',' + str(loss_epoch) + ',')
            print('*****************')

            # 验证模型
            data_loader.reset_batch_pointer(valid=True)
            loss_epoch = 0

            for b in range(data_loader.num_batches):

                # Get the source, target and dataset data for the next batch
                # x, y are input and target data which are lists containing numpy arrays of size seq_length x maxNumPeds x 3
                # d is the list of dataset indices from which each batch is generated (used to differentiate between datasets)
                x, y, d = data_loader.next_valid_batch()

                # variable to store the loss for this batch
                loss_batch = 0

                # For each sequence in the batch
                for batch in range(data_loader.batch_size):
                    # x_batch, y_batch and d_batch contains the source, target and dataset index data for
                    # seq_length long consecutive frames in the dataset
                    # x_batch, y_batch would be numpy arrays of size seq_length x maxNumPeds x 3
                    # d_batch would be a scalar identifying the dataset from which this sequence is extracted
                    x_batch, y_batch, d_batch = x[batch], y[batch], d[batch]

                    if d_batch == 0 and datasets[0] == 0:
                        dataset_data = [640, 480]
                    else:
                        dataset_data = [720, 576]

                    grid_batch = getSequenceGridMask(x_batch, dataset_data,
                                                     args.neighborhood_size,
                                                     args.grid_size)

                    # Feed the source, target data
                    feed = {
                        model.input_data: x_batch,
                        model.target_data: y_batch,
                        model.grid_data: grid_batch
                    }

                    train_loss = sess.run(model.cost, feed)

                    loss_batch += train_loss

                loss_batch = loss_batch / data_loader.batch_size
                loss_epoch += loss_batch

            loss_epoch /= data_loader.valid_num_batches

            # Update best validation loss until now
            if loss_epoch < best_val_loss:
                best_val_loss = loss_epoch
                best_epoch = e

            print('(epoch {}), valid_loss = {:.3f}'.format(e, loss_epoch))
            print('Best epoch', best_epoch, 'Best validation loss',
                  best_val_loss)
            log_file_curve.write(str(loss_epoch) + '\n')
            print('*****************')

            # Save the model after each epoch
            print('Saving model')
            checkpoint_path = os.path.join(save_directory, 'social_model.ckpt')
            saver.save(sess, checkpoint_path, global_step=e)
            print("model saved to {}".format(checkpoint_path))

        print('Best epoch', best_epoch, 'Best validation loss', best_val_loss)
        log_file.write(str(best_epoch) + ',' + str(best_val_loss))

        # CLose logging files
        log_file.close()
        log_file_curve.close()
コード例 #6
0
def main():
    parser = argparse.ArgumentParser(
        description="Sample new trajectories with a social LSTM")
    parser.add_argument(
        "modelParams",
        type=str,
        help=
        "Path to the file or folder with the parameters of the experiments",
    )
    parser.add_argument(
        "-l",
        "--logLevel",
        help="logging level of the logger. Default is INFO",
        metavar="level",
        type=str,
    )
    parser.add_argument(
        "-f",
        "--logFolder",
        help=
        "path to the folder where to save the logs. If None, logs are only printed in stderr",
        metavar="path",
        type=str,
    )
    parser.add_argument(
        "-ns",
        "--noSaveCoordinates",
        help="Flag to not save the predicted and ground truth coordinates",
        action="store_true",
    )
    args = parser.parse_args()

    if os.path.isdir(args.modelParams):
        names_experiments = os.listdir(args.modelParams)
        experiments = [
            os.path.join(args.modelParams, experiment)
            for experiment in names_experiments
        ]
    else:
        experiments = [args.modelParams]

    # Table will show the metrics of each experiment
    results = BeautifulTable()
    results.column_headers = ["Name experiment", "ADE", "FDE"]

    for experiment in experiments:
        # Load the parameters
        hparams = utils.YParams(experiment)
        # Define the logger
        setLogger(hparams, args, PHASE)

        remainSpaces = 29 - len(hparams.name)
        logging.info(
            "\n" +
            "--------------------------------------------------------------------------------\n"
            + "|                            Sampling experiment: " +
            hparams.name + " " * remainSpaces + "|\n" +
            "--------------------------------------------------------------------------------\n"
        )

        trajectory_size = hparams.obsLen + hparams.predLen

        saveCoordinates = False
        if args.noSaveCoordinates is True:
            saveCoordinates = False
        elif hparams.saveCoordinates:
            saveCoordinates = hparams.saveCoordinates

        if saveCoordinates:
            coordinates_path = os.path.join("coordinates", hparams.name)
            if not os.path.exists("coordinates"):
                os.makedirs("coordinates")

        logging.info("Loading the test datasets...")
        test_loader = utils.DataLoader(
            hparams.dataPath,
            hparams.testDatasets,
            hparams.testMaps,
            hparams.semanticMaps,
            hparams.testMapping,
            hparams.homography,
            num_labels=hparams.numLabels,
            delimiter=hparams.delimiter,
            skip=hparams.skip,
            max_num_ped=hparams.maxNumPed,
            trajectory_size=trajectory_size,
            neighborood_size=hparams.neighborhoodSize,
        )

        logging.info("Creating the test dataset pipeline...")
        dataset = utils.TrajectoriesDataset(
            test_loader,
            val_loader=None,
            batch=False,
            shuffle=hparams.shuffle,
            prefetch_size=hparams.prefetchSize,
        )

        logging.info("Creating the model...")
        start = time.time()
        model = SocialModel(dataset, hparams, phase=PHASE)
        end = time.time() - start
        logging.debug("Model created in {:.2f}s".format(end))

        # Define the path to the file that contains the variables of the model
        model_folder = os.path.join(hparams.modelFolder, hparams.name)
        model_path = os.path.join(model_folder, hparams.name)

        # Create a saver
        saver = tf.train.Saver()

        # Add to the computation graph the evaluation functions
        ade_sequence = utils.average_displacement_error(
            model.new_pedestrians_coordinates[-hparams.predLen:],
            model.pedestrians_coordinates[-hparams.predLen:],
            model.num_peds_frame,
        )

        fde_sequence = utils.final_displacement_error(
            model.new_pedestrians_coordinates[-1],
            model.pedestrians_coordinates[-1],
            model.num_peds_frame,
        )

        ade = 0
        fde = 0
        coordinates_predicted = []
        coordinates_gt = []
        peds_in_sequence = []

        # Zero padding
        padding = len(str(test_loader.num_sequences))

        # ============================ START SAMPLING ============================

        with tf.Session() as sess:
            # Restore the model trained
            saver.restore(sess, model_path)

            # Initialize the iterator of the sample dataset
            sess.run(dataset.init_train)

            logging.info(
                "\n" +
                "--------------------------------------------------------------------------------\n"
                +
                "|                                Start sampling                                |\n"
                +
                "--------------------------------------------------------------------------------\n"
            )

            for seq in range(test_loader.num_sequences):
                logging.info("Sample trajectory number {:{width}d}/{}".format(
                    seq + 1, test_loader.num_sequences, width=padding))

                ade_value, fde_value, coordinates_pred_value, coordinates_gt_value, num_peds = sess.run(
                    [
                        ade_sequence,
                        fde_sequence,
                        model.new_pedestrians_coordinates,
                        model.pedestrians_coordinates,
                        model.num_peds_frame,
                    ])
                ade += ade_value
                fde += fde_value
                coordinates_predicted.append(coordinates_pred_value)
                coordinates_gt.append(coordinates_gt_value)
                peds_in_sequence.append(num_peds)

            ade = ade / test_loader.num_sequences
            fde = fde / test_loader.num_sequences
            logging.info("Sampling finished. ADE: {:.4f} FDE: {:.4f}".format(
                ade, fde))
            results.append_row([hparams.name, ade, fde])

            if saveCoordinates:
                coordinates_predicted = np.array(coordinates_predicted)
                coordinates_gt = np.array(coordinates_gt)
                saveCoords(
                    coordinates_predicted,
                    coordinates_gt,
                    peds_in_sequence,
                    hparams.predLen,
                    coordinates_path,
                )
        tf.reset_default_graph()
    logging.info("\n{}".format(results))