예제 #1
0
    def __init__(self, args, verbose=True):
        self.verbose = verbose

        # define social LSTM model
        print('Building social LSTM model')
        with open(utils.check_path(args.social_conf_path), 'rb') as f:
            self._social_conf = pickle.load(f)
        self._model = SocialModel(self._social_conf, True)

        # define session
        self._sess = tf.InteractiveSession()

        # restore model parameters
        restorer = tf.train.Saver()
        ckpt = tf.train.get_checkpoint_state(
            os.path.abspath(os.path.expanduser(args.ckpt_dir)))
        print('loading model: {}'.format(ckpt.model_checkpoint_path))
        restorer.restore(self._sess, ckpt.model_checkpoint_path)

        # probability of a new pedestrian pops up if self._cur_num_peds doesn't reach self._max_num_peds
        self._new_peds_prob = args.new_peds_prob
        # maximum number of pedestrian in a frame
        self._max_num_peds = self._social_conf.maxNumPeds
        # number of pedestrian in the current frame
        self._cur_num_peds = 0
        # a list to indicate which pedestrians among all max_num_peds pedestrian exist
        self._peds_exist = [False] * self.max_num_peds
        # internal data for social LSTM model
        self._data = np.zeros((1, self._max_num_peds, 3))  # shape=(1,MNP,3)
        self._grid_data = np.zeros(
            (1, self._max_num_peds, self._max_num_peds,
             self._social_conf.grid_size**2))  # shape=(1,MNP,MNP,grid_size**2)
        self._init_data = np.zeros((args.init_num_step, self._max_num_peds,
                                    3))  # shape=(init_num_step,MNP,3)
        self._init_grid_data = np.zeros(
            (args.init_num_step, self._max_num_peds, self._max_num_peds,
             self._social_conf.grid_size**2
             ))  # shape=(init_num_step,MNP,MNP,grid_size**2)
        # shape of background, a 2-element list [width, height]
        self._bg_shape = args.bg_shape
        # number of step for initialization of a pedestrian
        self._init_num_step = args.init_num_step

        # for interpolation
        self._n_interp = args.n_interp
        self._interp_count = 0
        self._prev_data = np.zeros(self._data.shape)

        self._output_data = np.zeros(self._data.shape)
예제 #2
0
def train(args):
    datasets = range(4)
    # Remove the leaveDataset from datasets
    datasets.remove(args.leaveDataset)

    # Create the SocialDataLoader object
    data_loader = SocialDataLoader(args.batch_size,
                                   args.seq_length,
                                   args.maxNumPeds,
                                   datasets,
                                   forcePreProcess=True)

    with open(os.path.join('save', 'social_config.pkl'), 'wb') as f:
        pickle.dump(args, f)

    # Create a SocialModel object with the arguments
    model = SocialModel(args)

    # Initialize a TensorFlow session
    with tf.Session() as sess:
        # Initialize all variables in the graph
        sess.run(tf.initialize_all_variables())
        # Initialize a saver that saves all the variables in the graph
        saver = tf.train.Saver(tf.all_variables())

        # summary_writer = tf.train.SummaryWriter('/tmp/lstm/logs', graph_def=sess.graph_def)

        # For each epoch
        for e in range(args.num_epochs):
            # Assign the learning rate value for this epoch
            sess.run(
                tf.assign(model.lr, args.learning_rate * (args.decay_rate**e)))
            # Reset the data pointers in the data_loader
            data_loader.reset_batch_pointer()

            # For each batch
            for b in range(data_loader.num_batches):
                # Tic
                start = time.time()

                # Get the source, target and dataset data for the next batch
                # x, y are input and target data which are lists containing numpy arrays of size seq_length x maxNumPeds x 3
                # d is the list of dataset indices from which each batch is generated (used to differentiate between datasets)
                x, y, d = data_loader.next_batch()

                # variable to store the loss for this batch
                loss_batch = 0

                # For each sequence in the batch
                for batch in range(data_loader.batch_size):
                    # x_batch, y_batch and d_batch contains the source, target and dataset index data for
                    # seq_length long consecutive frames in the dataset
                    # x_batch, y_batch would be numpy arrays of size seq_length x maxNumPeds x 3
                    # d_batch would be a scalar identifying the dataset from which this sequence is extracted
                    x_batch, y_batch, d_batch = x[batch], y[batch], d[batch]

                    if d_batch == 0 and datasets[0] == 0:
                        dataset_data = [640, 480]
                    else:
                        dataset_data = [720, 576]

                    grid_batch = getSequenceGridMask(x_batch, dataset_data,
                                                     args.neighborhood_size,
                                                     args.grid_size)

                    # Feed the source, target data
                    feed = {
                        model.input_data: x_batch,
                        model.target_data: y_batch,
                        model.grid_data: grid_batch
                    }

                    train_loss, _ = sess.run([model.cost, model.train_op],
                                             feed)

                    loss_batch += train_loss

                end = time.time()
                loss_batch = loss_batch / data_loader.batch_size
                print(
                    "{}/{} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}"
                    .format(e * data_loader.num_batches + b,
                            args.num_epochs * data_loader.num_batches, e,
                            loss_batch, end - start))

                # Save the model if the current epoch and batch number match the frequency
                if (e * data_loader.num_batches +
                        b) % args.save_every == 0 and (
                            (e * data_loader.num_batches + b) > 0):
                    checkpoint_path = os.path.join('save', 'social_model.ckpt')
                    saver.save(sess,
                               checkpoint_path,
                               global_step=e * data_loader.num_batches + b)
                    print("model saved to {}".format(checkpoint_path))
예제 #3
0
def train(args):
    datasets = range(5)
    # Remove the leaveDataset from datasets
    datasets.remove(args.leaveDataset)

    # Create the SocialDataLoader object
    data_loader = SocialDataLoader(args.batch_size,
                                   args.seq_length,
                                   args.maxNumPeds,
                                   datasets,
                                   forcePreProcess=True,
                                   infer=False)

    # Log directory
    log_directory = 'log/'
    log_directory += str(args.leaveDataset) + '/'

    # Logging files
    log_file_curve = open(os.path.join(log_directory, 'log_curve.txt'), 'w')
    log_file = open(os.path.join(log_directory, 'val.txt'), 'w')

    # Save directory
    save_directory = 'save/'
    save_directory += str(args.leaveDataset) + '/'

    with open(os.path.join(save_directory, 'social_config.pkl'), 'wb') as f:
        pickle.dump(args, f)

    # Create a SocialModel object with the arguments
    model = SocialModel(args)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config = tf.ConfigProto(
        log_device_placement=True
    )  # Showing which device is allocated (in case of multiple GPUs)
    config.gpu_options.per_process_gpu_memory_fraction = 0.8  # Allocating 20% of memory in each GPU with 0.5
    # Initialize a TensorFlow session
    with tf.Session() as sess:
        # Initialize all variables in the graph
        sess.run(tf.global_variables_initializer())
        # Initialize a saver that saves all the variables in the graph
        saver = tf.train.Saver(tf.global_variables(), max_to_keep=None)

        # summary_writer = tf.train.SummaryWriter('/tmp/lstm/logs', graph_def=sess.graph_def)
        print('Training begin')
        best_val_loss = 100
        best_epoch = 0

        # For each epoch
        for e in range(args.num_epochs):
            # Assign the learning rate value for this epoch
            sess.run(
                tf.assign(model.lr, args.learning_rate * (args.decay_rate**e)))
            # Reset the data pointers in the data_loader
            data_loader.reset_batch_pointer(valid=False)

            loss_epoch = 0

            # For each batch
            for b in range(data_loader.num_batches):
                # Tic
                start = time.time()

                # Get the source, target and dataset data for the next batch
                # x, y are input and target data which are lists containing numpy arrays of size seq_length x maxNumPeds x 3
                # d is the list of dataset indices from which each batch is generated (used to differentiate between datasets)
                x, y, d = data_loader.next_batch()

                # variable to store the loss for this batch
                loss_batch = 0

                # For each sequence in the batch
                for batch in range(data_loader.batch_size):
                    # x_batch, y_batch and d_batch contains the source, target and dataset index data for
                    # seq_length long consecutive frames in the dataset
                    # x_batch, y_batch would be numpy arrays of size seq_length x maxNumPeds x 3
                    # d_batch would be a scalar identifying the dataset from which this sequence is extracted
                    x_batch, y_batch, d_batch = x[batch], y[batch], d[batch]

                    if d_batch == 0 and datasets[0] == 0:
                        dataset_data = [640, 480]
                    else:
                        dataset_data = [720, 576]

                    grid_batch = getSequenceGridMask(x_batch, dataset_data,
                                                     args.neighborhood_size,
                                                     args.grid_size)

                    # Feed the source, target data
                    feed = {
                        model.input_data: x_batch,
                        model.target_data: y_batch,
                        model.grid_data: grid_batch
                    }

                    train_loss, _ = sess.run([model.cost, model.train_op],
                                             feed)

                    loss_batch += train_loss

                end = time.time()
                loss_batch = loss_batch / data_loader.batch_size
                loss_epoch += loss_batch
                print(
                    "{}/{} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}"
                    .format(e * data_loader.num_batches + b,
                            args.num_epochs * data_loader.num_batches, e,
                            loss_batch, end - start))

                # Save the model if the current epoch and batch number match the frequency
                '''
                if (e * data_loader.num_batches + b) % args.save_every == 0 and ((e * data_loader.num_batches + b) > 0):
                    checkpoint_path = os.path.join('save', 'social_model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=e * data_loader.num_batches + b)
                    print("model saved to {}".format(checkpoint_path))
                '''
            loss_epoch /= data_loader.num_batches
            log_file_curve.write(str(e) + ',' + str(loss_epoch) + ',')
            print('*****************')

            # Validation
            data_loader.reset_batch_pointer(valid=True)
            loss_epoch = 0

            for b in range(data_loader.num_batches):

                # Get the source, target and dataset data for the next batch
                # x, y are input and target data which are lists containing numpy arrays of size seq_length x maxNumPeds x 3
                # d is the list of dataset indices from which each batch is generated (used to differentiate between datasets)
                x, y, d = data_loader.next_valid_batch()

                # variable to store the loss for this batch
                loss_batch = 0

                # For each sequence in the batch
                for batch in range(data_loader.batch_size):
                    # x_batch, y_batch and d_batch contains the source, target and dataset index data for
                    # seq_length long consecutive frames in the dataset
                    # x_batch, y_batch would be numpy arrays of size seq_length x maxNumPeds x 3
                    # d_batch would be a scalar identifying the dataset from which this sequence is extracted
                    x_batch, y_batch, d_batch = x[batch], y[batch], d[batch]

                    if d_batch == 0 and datasets[0] == 0:
                        dataset_data = [640, 480]
                    else:
                        dataset_data = [720, 576]

                    grid_batch = getSequenceGridMask(x_batch, dataset_data,
                                                     args.neighborhood_size,
                                                     args.grid_size)

                    # Feed the source, target data
                    feed = {
                        model.input_data: x_batch,
                        model.target_data: y_batch,
                        model.grid_data: grid_batch
                    }

                    train_loss = sess.run(model.cost, feed)

                    loss_batch += train_loss

                loss_batch = loss_batch / data_loader.batch_size
                loss_epoch += loss_batch

            loss_epoch /= data_loader.valid_num_batches

            # Update best validation loss until now
            if loss_epoch < best_val_loss:
                best_val_loss = loss_epoch
                best_epoch = e

            print('(epoch {}), valid_loss = {:.3f}'.format(e, loss_epoch))
            print('Best epoch', best_epoch, 'Best validation loss',
                  best_val_loss)
            log_file_curve.write(str(loss_epoch) + '\n')
            print('*****************')

            # Save the model after each epoch
            print('Saving model')
            checkpoint_path = os.path.join(save_directory, 'social_model.ckpt')
            saver.save(sess, checkpoint_path, global_step=e)
            print("model saved to {}".format(checkpoint_path))

        print('Best epoch', best_epoch, 'Best validation loss', best_val_loss)
        log_file.write(str(best_epoch) + ',' + str(best_val_loss))

        # CLose logging files
        log_file.close()
        log_file_curve.close()
예제 #4
0
    def __init__(self):
        self.node_name = 'social_lstm'

        rospy.init_node(self.node_name)
        rospy.on_shutdown(self.cleanup)

        #         self.obs_length = 4
        #         self.pred_length = 8
        #         self.prev_length = 8
        self.obs_length = 8
        self.pred_length = 12
        self.prev_length = 12
        self.max_pedestrians = 40
        self.dimensions = [640, 480]
        self.time_resolution = 0.5

        # Define the path for the config file for saved args
        with open(os.path.join('save', 'social_config.pkl'), 'rb') as f:
            self.saved_args = pickle.load(f)

        # Create a SocialModel object with the saved_args and infer set to true
        self.social_lstm_model = SocialModel(self.saved_args, True)

        # Initialize a TensorFlow session
        self.sess = tf.InteractiveSession()

        # Initialize a saver
        saver = tf.train.Saver()

        # Get the checkpoint state for the model
        ckpt = tf.train.get_checkpoint_state('save')
        print('loading model: ', ckpt.model_checkpoint_path)

        # Restore the model at the checkpoint
        saver.restore(self.sess, ckpt.model_checkpoint_path)

        self.tracked_persons_sub = rospy.Subscriber("/tracked_persons",
                                                    TrackedPersons,
                                                    self.predict)
        self.pedestrian_prediction_pub = rospy.Publisher("/predicted_persons",
                                                         PeoplePrediction,
                                                         queue_size=1)
        self.prediction_marker_pub = rospy.Publisher(
            "/predicted_persons_marker_array", MarkerArray, queue_size=1)

        # Initialize the marker for people prediction
        self.prediction_marker = Marker()
        self.prediction_marker.type = Marker.SPHERE
        self.prediction_marker.action = Marker.MODIFY
        self.prediction_marker.ns = "people_predictions"
        self.prediction_marker.pose.orientation.w = 1
        self.prediction_marker.color.r = 0
        self.prediction_marker.color.g = 0
        self.prediction_marker.color.b = 0.5
        self.prediction_marker.scale.x = 0.2
        self.prediction_marker.scale.y = 0.2
        self.prediction_marker.scale.z = 0.2

        self.prev_frames = []
        for i in range(self.prev_length):
            self.prev_frames.append({})

        rospy.loginfo("Waiting for tracked persons...")
        rospy.wait_for_message("/predicted_persons", PeoplePrediction)
        rospy.loginfo("Ready.")
예제 #5
0
class Social_Lstm_Prediction():
    def __init__(self):
        self.node_name = 'social_lstm'

        rospy.init_node(self.node_name)
        rospy.on_shutdown(self.cleanup)

        #         self.obs_length = 4
        #         self.pred_length = 8
        #         self.prev_length = 8
        self.obs_length = 8
        self.pred_length = 12
        self.prev_length = 12
        self.max_pedestrians = 40
        self.dimensions = [640, 480]
        self.time_resolution = 0.5

        # Define the path for the config file for saved args
        with open(os.path.join('save', 'social_config.pkl'), 'rb') as f:
            self.saved_args = pickle.load(f)

        # Create a SocialModel object with the saved_args and infer set to true
        self.social_lstm_model = SocialModel(self.saved_args, True)

        # Initialize a TensorFlow session
        self.sess = tf.InteractiveSession()

        # Initialize a saver
        saver = tf.train.Saver()

        # Get the checkpoint state for the model
        ckpt = tf.train.get_checkpoint_state('save')
        print('loading model: ', ckpt.model_checkpoint_path)

        # Restore the model at the checkpoint
        saver.restore(self.sess, ckpt.model_checkpoint_path)

        self.tracked_persons_sub = rospy.Subscriber("/tracked_persons",
                                                    TrackedPersons,
                                                    self.predict)
        self.pedestrian_prediction_pub = rospy.Publisher("/predicted_persons",
                                                         PeoplePrediction,
                                                         queue_size=1)
        self.prediction_marker_pub = rospy.Publisher(
            "/predicted_persons_marker_array", MarkerArray, queue_size=1)

        # Initialize the marker for people prediction
        self.prediction_marker = Marker()
        self.prediction_marker.type = Marker.SPHERE
        self.prediction_marker.action = Marker.MODIFY
        self.prediction_marker.ns = "people_predictions"
        self.prediction_marker.pose.orientation.w = 1
        self.prediction_marker.color.r = 0
        self.prediction_marker.color.g = 0
        self.prediction_marker.color.b = 0.5
        self.prediction_marker.scale.x = 0.2
        self.prediction_marker.scale.y = 0.2
        self.prediction_marker.scale.z = 0.2

        self.prev_frames = []
        for i in range(self.prev_length):
            self.prev_frames.append({})

        rospy.loginfo("Waiting for tracked persons...")
        rospy.wait_for_message("/predicted_persons", PeoplePrediction)
        rospy.loginfo("Ready.")

    def __interp_helper(self, y):
        """Helper to handle indices and logical indices of NaNs.

        Input:
            - y, 1d numpy array with possible NaNs
        Output:
            - nans, logical indices of NaNs
            - index, a function, with signature indices= index(logical_indices),
              to convert logical indices of NaNs to 'equivalent' indices
        Example:
            >>> # linear interpolation of NaNs
            >>> nans, x= nan_helper(y)
            >>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
        """

        return np.isnan(y), lambda z: z.nonzero()[0]

    def __interpolate_1d_array(self, before_interpolated):
        nparray = np.array(before_interpolated)

        index = -1
        for i in range(self.prev_length):
            if np.isnan(nparray[i]) == False:
                index = i
                break

        for i in range(index):
            nparray[i] = nparray[index]

        nans, x = self.__interp_helper(nparray)
        nparray[nans] = np.interp(x(nans), x(~nans), nparray[~nans])
        return nparray

    def __generate_input(self, tracks):
        num_tracks = len(tracks)
        whole_array = []
        for i in range(num_tracks):
            track = tracks[i]
            track_id = track.track_id

            history_positions_x = []
            history_positions_y = []
            for index in range(self.prev_length):
                history_positions_x.append(float('nan'))
                history_positions_y.append(float('nan'))
                if track_id in self.prev_frames[index]:
                    history_positions_x[index] = self.prev_frames[index][
                        track_id][0]
                    history_positions_y[index] = self.prev_frames[index][
                        track_id][1]

            print history_positions_x
            print history_positions_y

            history_positions_x = self.__interpolate_1d_array(
                history_positions_x)
            history_positions_y = self.__interpolate_1d_array(
                history_positions_y)
            tracks_array = np.zeros((self.obs_length, 3))
            tracks_array[:, 0] = track_id
            tracks_array[:, 1] = np.array(history_positions_x)[4:]
            tracks_array[:, 2] = np.array(history_positions_y)[4:]
            tracks_array = np.expand_dims(tracks_array, 1)

            print tracks_array

            if i == 0:
                whole_array = tracks_array
            else:
                whole_array = np.append(whole_array, tracks_array, axis=1)

        res_input = np.zeros(
            (self.obs_length + self.prev_length, self.max_pedestrians, 3))
        res_input[:self.obs_length, :num_tracks, :] = whole_array
        return res_input

    def predict(self, tracked_persons):
        tracks = tracked_persons.tracks

        track_dict = {}
        for track in tracks:
            #print track
            #print track.pose.pose.position.x
            track_dict[track.track_id] = [
                track.pose.pose.position.x, track.pose.pose.position.y
            ]

        del self.prev_frames[0]
        self.prev_frames.append(track_dict)

        if len(tracks) == 0:
            return

        input_data = self.__generate_input(tracks)
        #print input_data.shape
        #print input_data
        grid_batch = getSequenceGridMask(input_data, self.dimensions,
                                         self.saved_args.neighborhood_size,
                                         self.saved_args.grid_size)

        obs_traj = input_data[:self.obs_length]
        obs_grid = grid_batch[:self.obs_length]

        print "********************** PREDICT NEW TRAJECTORY ******************************"
        complete_traj = self.social_lstm_model.sample(self.sess, obs_traj,
                                                      obs_grid,
                                                      self.dimensions,
                                                      input_data,
                                                      self.pred_length)
        #print complete_traj

        # Initialize the markers array
        prediction_markers = MarkerArray()

        # Publish them
        people_predictions = PeoplePrediction()
        for frame_index in range(self.pred_length):
            people = People()
            people.header.stamp = tracked_persons.header.stamp + rospy.Duration(
                frame_index * self.time_resolution)
            people.header.frame_id = tracked_persons.header.frame_id

            predicted_frame_index = frame_index + self.obs_length
            for person_index in range(self.max_pedestrians):
                track_id = complete_traj[predicted_frame_index, person_index,
                                         0]
                x_coord = complete_traj[predicted_frame_index, person_index, 1]
                y_coord = complete_traj[predicted_frame_index, person_index, 2]
                if track_id == 0:
                    break

                person = Person()
                person.name = str(track_id)

                point = Point()
                point.x = x_coord
                point.y = y_coord
                person.position = point
                people.people.append(person)

                self.prediction_marker.header.frame_id = tracked_persons.header.frame_id
                self.prediction_marker.header.stamp = tracked_persons.header.stamp
                self.prediction_marker.id = int(track_id)
                self.prediction_marker.pose.position.x = person.position.x
                self.prediction_marker.pose.position.y = person.position.y
                #self.prediction_marker.color.a = 1 - (frame_index * 1.0 / (self.pred_length * 1.0))
                self.prediction_marker.color.a = 1.0
                prediction_markers.markers.append(self.prediction_marker)

            people_predictions.predicted_people.append(people)

        #print people_predictions

        self.pedestrian_prediction_pub.publish(people_predictions)
        self.prediction_marker_pub.publish(prediction_markers)

    def cleanup(self):
        print "Shutting down social lstm node"
예제 #6
0
def main():
    parser = argparse.ArgumentParser()
    # Observed length of the trajectory parameter
    parser.add_argument('--obs_length',
                        type=int,
                        default=4,
                        help='Observed length of the trajectory')
    # Predicted length of the trajectory parameter
    parser.add_argument('--pred_length',
                        type=int,
                        default=4,
                        help='Predicted length of the trajectory')
    # Custom scenario to be tested on
    parser.add_argument('--scenario',
                        type=str,
                        default='collision',
                        help='Custom scenario to be tested on')

    sample_args = parser.parse_args()

    # Define the path for the config file for saved args
    with open(os.path.join('save', 'social_config.pkl'), 'rb') as f:
        saved_args = pickle.load(f)

    # Create a SocialModel object with the saved_args and infer set to true
    model = SocialModel(saved_args, True)
    # Initialize a TensorFlow session
    sess = tf.InteractiveSession()
    # Initialize a saver
    saver = tf.train.Saver()

    # Get the checkpoint state for the model
    ckpt = tf.train.get_checkpoint_state('save')
    print('loading model: ', ckpt.model_checkpoint_path)

    # Restore the model at the checkpoint
    saver.restore(sess, ckpt.model_checkpoint_path)

    results = []

    # Load the data
    file_path = os.path.join('matlab', 'csv', sample_args.scenario + '.csv')
    data = np.genfromtxt(file_path, delimiter=',')
    # Reshape data
    x_batch = np.reshape(data, [
        sample_args.obs_length + sample_args.pred_length,
        saved_args.maxNumPeds, 3
    ])

    dimensions = [720, 576]
    grid_batch = getSequenceGridMask(x_batch, [720, 576],
                                     saved_args.neighborhood_size,
                                     saved_args.grid_size)

    obs_traj = x_batch[:sample_args.obs_length]
    obs_grid = grid_batch[:sample_args.obs_length]

    complete_traj = model.sample(sess, obs_traj, obs_grid, dimensions, x_batch,
                                 sample_args.pred_length)

    total_error = get_mean_error(complete_traj, x_batch,
                                 sample_args.obs_length, saved_args.maxNumPeds)

    print "Mean error of the model on this scenario is", total_error
예제 #7
0
def main():
    parser = argparse.ArgumentParser()
    # Observed length of the trajectory parameter
    parser.add_argument('--obs_length',
                        type=int,
                        default=8,
                        help='Observed length of the trajectory')
    # Predicted length of the trajectory parameter
    parser.add_argument('--pred_length',
                        type=int,
                        default=12,
                        help='Predicted length of the trajectory')
    # Test dataset
    parser.add_argument('--test_dataset',
                        type=int,
                        default=0,
                        help='Dataset to be tested on')

    # Parse the parameters
    sample_args = parser.parse_args()

    # Define the path for the config file for saved args
    with open(os.path.join('save', 'social_config.pkl'), 'rb') as f:
        saved_args = pickle.load(f)

    # Create a SocialModel object with the saved_args and infer set to true
    model = SocialModel(saved_args, True)
    # Initialize a TensorFlow session
    sess = tf.InteractiveSession()
    # Initialize a saver
    saver = tf.train.Saver()

    # Get the checkpoint state for the model
    ckpt = tf.train.get_checkpoint_state('save')
    print('loading model: ', ckpt.model_checkpoint_path)

    # Restore the model at the checkpoint
    saver.restore(sess, ckpt.model_checkpoint_path)
    # saver.restore(sess, 'save/social_model.ckpt-800')
    # Dataset to get data from
    dataset = [sample_args.test_dataset]

    # Create a SocialDataLoader object with batch_size 1 and seq_length equal to observed_length + pred_length
    data_loader = SocialDataLoader(
        1, sample_args.pred_length + sample_args.obs_length,
        saved_args.maxNumPeds, dataset, True)

    # Reset all pointers of the data_loader
    data_loader.reset_batch_pointer()

    # Variable to maintain total error
    total_error = 0
    # For each batch
    for b in range(data_loader.num_batches):
        # Get the source, target and dataset data for the next batch
        x, y, d = data_loader.next_batch()

        # Batch size is 1
        x_batch, y_batch, d_batch = x[0], y[0], d[0]

        if d_batch == 0 and dataset[0] == 0:
            dimensions = [640, 480]
        else:
            dimensions = [720, 576]

        grid_batch = getSequenceGridMask(x_batch, dimensions,
                                         saved_args.neighborhood_size,
                                         saved_args.grid_size)

        obs_traj = x_batch[:sample_args.obs_length]
        obs_grid = grid_batch[:sample_args.obs_length]
        # obs_traj is an array of shape obs_length x maxNumPeds x 3

        complete_traj = model.sample(sess, obs_traj, obs_grid, dimensions,
                                     x_batch, sample_args.pred_length)

        # ipdb.set_trace()
        # complete_traj is an array of shape (obs_length+pred_length) x maxNumPeds x 3
        total_error += get_mean_error(complete_traj, x[0],
                                      sample_args.obs_length,
                                      saved_args.maxNumPeds)

        print "Processed trajectory number : ", b, "out of ", data_loader.num_batches, " trajectories"

    # Print the mean error across all the batches
    print "Total mean error of the model is ", total_error / data_loader.num_batches
예제 #8
0
def main():

    # Set random seed
    np.random.seed(1)

    parser = argparse.ArgumentParser()
    # Observed length of the trajectory parameter
    parser.add_argument('--obs_length',
                        type=int,
                        default=6,
                        help='Observed length of the trajectory')
    # Predicted length of the trajectory parameter
    parser.add_argument('--pred_length',
                        type=int,
                        default=6,
                        help='Predicted length of the trajectory')
    # Test dataset
    parser.add_argument('--test_dataset',
                        type=int,
                        default=3,
                        help='Dataset to be tested on')

    # Model to be loaded
    parser.add_argument('--epoch',
                        type=int,
                        default=0,
                        help='Epoch of model to be loaded')

    # Parse the parameters
    # sample_args = parser.parse_args()
    args = parser.parse_args()
    # Save directory
    save_directory = 'save/' + str(args.test_dataset) + '/'

    # Define the path for the config file for saved args
    with open(os.path.join(save_directory, 'social_config.pkl'), 'rb') as f:
        saved_args = pickle.load(f)

    # Create a SocialModel object with the saved_args and infer set to true
    model = SocialModel(saved_args, True)
    # Initialize a TensorFlow session
    sess = tf.InteractiveSession()
    # Initialize a saver
    saver = tf.train.Saver()

    # Get the checkpoint state for the model
    ckpt = tf.train.get_checkpoint_state(save_directory)
    # print ('loading model: ', ckpt.model_checkpoint_path)
    # print('hahah: ', len(ckpt.all_model_checkpoint_paths))
    print('loading model: ', ckpt.all_model_checkpoint_paths[args.epoch])

    # Restore the model at the checkpoint
    saver.restore(sess, ckpt.all_model_checkpoint_paths[args.epoch])

    # Dataset to get data from
    dataset = [0]

    # Create a SocialDataLoader object with batch_size 1 and seq_length equal to observed_length + pred_length
    data_loader = SocialDataLoader(1,
                                   args.pred_length + args.obs_length,
                                   saved_args.maxNumPeds,
                                   dataset,
                                   True,
                                   infer=True)

    # Reset all pointers of the data_loader
    data_loader.reset_batch_pointer()

    results = []

    # Variable to maintain total error
    total_error = 0
    # For each batch
    for b in range(data_loader.num_batches):
        # Get the source, target and dataset data for the next batch
        x, y, d = data_loader.next_batch(randomUpdate=False)

        # Batch size is 1
        x_batch, y_batch, d_batch = x[0], y[0], d[0]

        # if d_batch == 0 and dataset[0] == 0:
        #     dimensions = [640, 480]
        # else:
        #     dimensions = [720, 576]
        dimensions = [1640, 78]

        grid_batch = getSequenceGridMask(x_batch, dimensions,
                                         saved_args.neighborhood_size,
                                         saved_args.grid_size)

        obs_traj = x_batch[:args.obs_length]
        obs_grid = grid_batch[:args.obs_length]
        # obs_traj is an array of shape obs_length x maxNumPeds x 3

        print "********************** SAMPLING A NEW TRAJECTORY", b, "******************************"
        complete_traj = model.sample(sess, obs_traj, obs_grid, dimensions,
                                     x_batch, args.pred_length)

        # ipdb.set_trace()
        # complete_traj is an array of shape (obs_length+pred_length) x maxNumPeds x 3
        print('hahah', len(complete_traj))
        total_error += get_mean_error(complete_traj, x[0], args.obs_length,
                                      saved_args.maxNumPeds)

        print "Processed trajectory number : ", b, "out of ", data_loader.num_batches, " trajectories"

        # plot_trajectories(x[0], complete_traj, sample_args.obs_length)
        # return
        results.append((x[0], complete_traj, args.obs_length))

    # Print the mean error across all the batches
    print "Total mean error of the model is ", total_error / data_loader.num_batches

    print "Saving results"
    with open(os.path.join(save_directory, 'social_results.pkl'), 'wb') as f:
        pickle.dump(results, f)
예제 #9
0
def train(args):
    if args.visible:
        os.environ["CUDA_VISIBLE_DEVICES"] = args.visible

    save_path = make_save_path(args)
    dataset_path = args.dataset_path
    log_path = os.path.join(save_path, 'log')
    if not os.path.isdir(log_path):
        os.makedirs(log_path)
    # Create the SocialDataLoader object
    data_loader = SocialDataLoader(args.batch_size, args.seq_length,
            args.maxNumPeds, dataset_path, forcePreProcess=True)

    with open(os.path.join(save_path, 'social_config.pkl'), 'wb') as f:
        pickle.dump(args, f)

    # Create a SocialModel object with the arguments
    model = SocialModel(args)
    all_loss = []
    # Initialize a TensorFlow session
    with tf.Session() as sess:
        # Initialize all variables in the graph
        sess.run(tf.initialize_all_variables())
        # Initialize a saver that saves all the variables in the graph
        saver = tf.train.Saver(tf.all_variables())
        summary_writer = tf.summary.FileWriter(log_path, sess.graph)

        # For each epoch
        for e in range(args.num_epochs):
            # Assign the learning rate value for this epoch
            sess.run(tf.assign(model.lr, args.learning_rate * (args.decay_rate ** e)))
            # Reset the data pointers in the data_loader
            data_loader.reset_batch_pointer()

            # For each batch
            for b in range(data_loader.num_batches):
                # Tic
                start = time.time()

                # Get the source, target and dataset data for the next batch
                # s_batch, t_batch are input and target data which are lists containing numpy arrays of size seq_length x maxNumPeds x 3
                # d is the list of dataset indices from which each batch is generated (used to differentiate between datasets)
                s_batch, t_batch, d = data_loader.next_batch()

                # variable to store the loss for this batch
                loss_batch = 0

                # For each sequence in the batch
                for seq_num in range(data_loader.batch_size):
                    # s_seq, t_seq and d_batch contains the source, target and dataset index data for
                    # seq_length long consecutive frames in the dataset
                    # s_seq, t_seq would be numpy arrays of size seq_length x maxNumPeds x 3
                    # d_batch would be a scalar identifying the dataset from which this sequence is extracted
                    s_seq, t_seq, d_seq = s_batch[seq_num], t_batch[seq_num], d[seq_num]
                    '''
                    if d_seq == 0 and datasets[0] == 0:
                        dataset_data = [640, 480]
                    else:
                        dataset_data = [720, 576]
                    '''
                    grid_batch = getSequenceGridMask(s_seq, [0, 0], args.neighborhood_size, args.grid_size)

                    # Feed the source, target data
                    feed = {model.input_data: s_seq, model.target_data: t_seq, model.grid_data: grid_batch}

                    train_loss, _ = sess.run([model.cost, model.train_op], feed)

                    loss_batch += train_loss

                end = time.time()
                loss_batch = loss_batch / data_loader.batch_size
                all_loss.append(loss_batch)
                print(
                    "{}/{} (epoch {}), train_loss = {:.3f}, time/seq_num = {:.3f}"
                    .format(
                        e * data_loader.num_batches + b,
                        args.num_epochs * data_loader.num_batches,
                        e,
                        loss_batch, end - start))

                # Save the model if the current epoch and batch number match the frequency
                if (e * data_loader.num_batches + b) % args.save_every == 0 and ((e * data_loader.num_batches + b) > 0):
                    checkpoint_path = os.path.join(save_path, 'social_model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=e * data_loader.num_batches + b)
                    print("model saved to {}".format(checkpoint_path))
                    np.savetxt(os.path.join(log_path, 'loss.txt'), np.asarray(all_loss))
예제 #10
0
class AllPeds(object):
    def __init__(self, args, verbose=True):
        self.verbose = verbose

        # define social LSTM model
        print('Building social LSTM model')
        with open(utils.check_path(args.social_conf_path), 'rb') as f:
            self._social_conf = pickle.load(f)
        self._model = SocialModel(self._social_conf, True)

        # define session
        self._sess = tf.InteractiveSession()

        # restore model parameters
        restorer = tf.train.Saver()
        ckpt = tf.train.get_checkpoint_state(
            os.path.abspath(os.path.expanduser(args.ckpt_dir)))
        print('loading model: {}'.format(ckpt.model_checkpoint_path))
        restorer.restore(self._sess, ckpt.model_checkpoint_path)

        # probability of a new pedestrian pops up if self._cur_num_peds doesn't reach self._max_num_peds
        self._new_peds_prob = args.new_peds_prob
        # maximum number of pedestrian in a frame
        self._max_num_peds = self._social_conf.maxNumPeds
        # number of pedestrian in the current frame
        self._cur_num_peds = 0
        # a list to indicate which pedestrians among all max_num_peds pedestrian exist
        self._peds_exist = [False] * self.max_num_peds
        # internal data for social LSTM model
        self._data = np.zeros((1, self._max_num_peds, 3))  # shape=(1,MNP,3)
        self._grid_data = np.zeros(
            (1, self._max_num_peds, self._max_num_peds,
             self._social_conf.grid_size**2))  # shape=(1,MNP,MNP,grid_size**2)
        self._init_data = np.zeros((args.init_num_step, self._max_num_peds,
                                    3))  # shape=(init_num_step,MNP,3)
        self._init_grid_data = np.zeros(
            (args.init_num_step, self._max_num_peds, self._max_num_peds,
             self._social_conf.grid_size**2
             ))  # shape=(init_num_step,MNP,MNP,grid_size**2)
        # shape of background, a 2-element list [width, height]
        self._bg_shape = args.bg_shape
        # number of step for initialization of a pedestrian
        self._init_num_step = args.init_num_step

        # for interpolation
        self._n_interp = args.n_interp
        self._interp_count = 0
        self._prev_data = np.zeros(self._data.shape)

        self._output_data = np.zeros(self._data.shape)

    def step(self):
        if self._interp_count % self._n_interp == 0:
            self._prev_data = self._data
            self._step()
            self._output_data = self._prev_data
        else:
            xy_data_prev = self._prev_data[0, :, 1:3]
            #xy_data_prev[self._data[0,:,0]==0,:] = 0
            xy_data = self._data[0, :, 1:3]

            self._output_data[0, :, 0] = self._data[0, :, 0]
            r = (self._interp_count + 1.) / self._n_interp
            self._output_data[0, :, 1:3] = (1 - r) * xy_data_prev + r * xy_data

        self._interp_count = (self._interp_count + 1) % self._n_interp

    def _step(self):
        ### a certain chance of a new pedstrain poping up
        # only possible if current number of pedestrians doesn't exceed the maximum number
        if self._cur_num_peds < self._max_num_peds:
            new_ped_pops_up = self._pops_up_fn(self._cur_num_peds,
                                               self._max_num_peds)
            if new_ped_pops_up:
                # create initial data for the new pedestrain
                new_data = self._init_ped_data()
                # add data of the new pedestrian self._data and self._grid_data
                for i in range(self._max_num_peds):
                    if self._data[0, i, 0] == 0:  # an unoccupied element
                        newly_exist_pedID = i + 1
                        self._init_data[:, i, 0] = newly_exist_pedID
                        self._init_data[:, i, 1:3] = new_data
                        break
                assert (newly_exist_pedID == i + 1)  #DEBUG
                self._init_grid_data = getSequenceGridMask(
                    self._init_data, self._bg_shape,
                    self._social_conf.neighborhood_size,
                    self._social_conf.grid_size)
                # reinitialize LSTM model
                self._model.sample_init(self._sess, self._init_data,
                                        self._init_grid_data)
                self._data[0, i, :] = self._init_data[
                    -1,
                    i, :]  #np.reshape(self._init_data[-1,:,:], self._data.shape)
                self._grid_data = np.reshape(self._init_grid_data[-1, :, :, :],
                                             self._grid_data.shape)
                # update current number of pedestrians and pedestrian existence list
                self._cur_num_peds += 1
                self._peds_exist[i] = True

                if self.verbose:
                    print('A new pedestrian with ID {} pops up at ({},{})'\
                          .format(newly_exist_pedID,
                                  int(new_data[0,0]*self._bg_shape[0]),
                                  int(new_data[0,1]*self._bg_shape[1])))
        ### predict next step of all existing pedestrians
        self._data, self._grid_data = self._model.sample_one_step(
            self._sess, self._data, self._grid_data, self._bg_shape)

        ### remove pedestrians out-of-bound (not in the background area)
        for i in range(self._max_num_peds):
            pedID = self._data[0, i, 0]
            # if pedID==0 --> nonexisting pedestrian
            if pedID != 0:
                x = self._data[0, i, 1]
                y = self._data[0, i, 2]
                if (x < -0.1) or (x > 1.1) or (y < -0.1) or (y > 1.1):
                    # remove data of current pedstrian from self._data and self._grid_data
                    self._init_data[:, i, :] = 0
                    self._init_grid_data = getSequenceGridMask(
                        self._init_data, self._bg_shape,
                        self._social_conf.neighborhood_size,
                        self._social_conf.grid_size)
                    # reinitialize social LSTM model
                    self._model.sample_init(self._sess, self._init_data,
                                            self._init_grid_data)
                    self._data[0, i, :] = self._init_data[
                        -1,
                        i, :]  #np.reshape(self._init_data[-1,:,:], self._data.shape)
                    self._grid_data = np.reshape(
                        self._init_grid_data[-1, :, :, :],
                        self._grid_data.shape)
                    # update current number of pedestrian and pedestrian existence list
                    self._cur_num_peds -= 1
                    self._peds_exist[i] = False

                    if self.verbose:
                        print('A pedestrian with ID {} is out-of-bound at ({},{}) and is removed.'\
                              .format(int(pedID),
                                      int(x*self._bg_shape[0]),
                                      int(y*self._bg_shape[1])))

    def _pops_up_fn(self, cur_n, max_n):
        #prob = self._new_peds_prob
        if self._cur_num_peds <= 15:
            prob = 0.5
        else:
            prob = -0.1
        rv = random.uniform(0, 1)  # a random variable ~ U(0,1)
        coin = (rv <= prob)
        return coin

    def _init_ped_data(self):
        random.seed(time.time())
        data = np.zeros((self._init_num_step, 2))
        # randomly pick a side among 4 sides of the background
        which_side = random.randint(1, 4)
        # randomly select start speed
        #mu = 0.01
        #sigma = 0.005
        #speed = np.random.normal(mu, sigma, 1)[0]
        speed = random.uniform(0.0001, 0.0005) / 10000
        # take steps (not considering direction yet)
        scalar_steps = speed * np.arange(self._init_num_step)
        # walking direction
        angle = random.uniform(0, math.pi / 2)
        # random start point
        start = random.uniform(0.2, 0.8)
        # calibrate angle according to different sides from wich the ped pops up
        if which_side == 1:  # up
            angle += 1.25 * math.pi
            data[:, 0] = scalar_steps * math.cos(angle) + start  # x
            data[:, 1] = scalar_steps * -math.sin(angle)  # y
        elif which_side == 2:  # left
            angle += 1.75
            if angle >= 2.:
                angle -= 2.
            data[:, 0] = scalar_steps * math.cos(angle)  # x
            data[:, 1] = scalar_steps * math.sin(angle) + start  # y
        elif which_side == 3:  # down
            angle += 0.25
            data[:, 0] = scalar_steps * math.cos(angle) + start  # x
            data[:, 1] = 1. - scalar_steps * math.sin(angle)  # y
        else:  # which_side==4, right
            angle += 0.75
            data[:, 0] = 1. + scalar_steps * math.cos(angle)  # x
            data[:, 1] = scalar_steps * math.sin(angle) + start  # y

        return data

    def check_collision(self):
        tol = 1. / np.amax(self._bg_shape)
        n_collision = 0
        for i in range(self._max_num_peds):
            pedID_ref = self._data[0, i, 0]
            if pedID_ref != 0:
                for j in range(i + 1, self._max_num_peds):
                    pedID_other = self._data[0, j, 0]
                    if pedID_other != 0:
                        refXY = self._data[0, i, 1:3]
                        otherXY = self._data[0, j, 1:3]
                        dist = np.sum(np.square(refXY - otherXY))**0.5
                        if dist < tol:
                            n_collision += 1
                            if self.verbose:
                                print('Pedestrian {} collides with pedestrian {}'\
                                      .format(int(pedID_ref), int(pedID_other)))
        if self._cur_num_peds > 1:
            c_rate = (2 * n_collision / ((self._cur_num_peds**2) *
                                         (self._cur_num_peds - 1)))**0.5
        else:
            c_rate = 0.

        return n_collision, c_rate

    def close_session(self):
        self._sess.close()
        print('TF session in AllPeds class is closed.')

    @property
    def bg_shape(self):
        return self._bg_shape

    @property
    def max_num_peds(self):
        return self._max_num_peds

    @property
    def cur_num_peds(self):
        return self._cur_num_peds

    @property
    def existing_peds(self):
        existing_peds = self._output_data[0,
                                          self._output_data[0, :, 0] != 0, :]
        existing_peds[:, 0] = (existing_peds[:, 0])
        existing_peds[:, 1] = (existing_peds[:, 1] * self._bg_shape[0]).astype(
            np.int32)
        existing_peds[:, 2] = (existing_peds[:, 2] * self._bg_shape[1]).astype(
            np.int32)
        return existing_peds
예제 #11
0
def main():

    # Set random seed
    np.random.seed(1)

    parser = argparse.ArgumentParser()
    # Observed length of the trajectory parameter
    parser.add_argument('--obs_length', type=int, default=8,
                        help='Observed length of the trajectory')
    # Predicted length of the trajectory parameter
    parser.add_argument('--pred_length', type=int, default=12,
                        help='Predicted length of the trajectory')
    # Test dataset
    parser.add_argument('--test_dataset', type=str,
                        help='Dataset to be tested on')

    parser.add_argument('--visible',type=str,
                        required=False, default=None, help='GPU to run on')

    parser.add_argument('--model_path', type=str)
    # Parse the parameters
    sample_args = parser.parse_args()

    if sample_args.visible:
        os.environ["CUDA_VISIBLE_DEVICES"] = sample_args.visible

    save_path = sample_args.model_path

    # Define the path for the config file for saved args
    with open(os.path.join(save_path, 'social_config.pkl'), 'rb') as f:
        saved_args = pickle.load(f)

    # Create a SocialModel object with the saved_args and infer set to true
    model = SocialModel(saved_args, True)
    # Initialize a TensorFlow session
    sess = tf.InteractiveSession()
    # Initialize a saver
    saver = tf.train.Saver()

    # Get the checkpoint state for the model
    ckpt = tf.train.get_checkpoint_state(save_path)
    print ('loading model: ', ckpt.model_checkpoint_path)

    # Restore the model at the checkpoint
    saver.restore(sess, ckpt.model_checkpoint_path)

    # Create a SocialDataLoader object with batch_size 1 and seq_length equal to observed_length + pred_length
    data_loader = SocialDataLoader(1, sample_args.pred_length +
            sample_args.obs_length, saved_args.maxNumPeds, sample_args.test_dataset, True)

    # Reset all pointers of the data_loader
    data_loader.reset_batch_pointer()

    results = []

    # Variable to maintain total error
    total_error = 0
    # For each batch
    for b in range(data_loader.num_batches):
        # Get the source, target and dataset data for the next batch
        x, y, d = data_loader.next_batch(randomUpdate=False)

        # Batch size is 1
        x_batch, y_batch, d_batch = x[0], y[0], d[0]

        '''
        if d_batch == 0 and dataset[0] == 0:
            dimensions = [640, 480]
        else:
            dimensions = [720, 576]
        '''
        grid_batch = getSequenceGridMask(x_batch, [0,0], saved_args.neighborhood_size, saved_args.grid_size)

        obs_traj = x_batch[:sample_args.obs_length]
        obs_grid = grid_batch[:sample_args.obs_length]
        # obs_traj is an array of shape obs_length x maxNumPeds x 3

        print "********************** SAMPLING A NEW TRAJECTORY", b, "******************************"
        complete_traj = model.sample(sess, obs_traj, obs_grid, [0,0], x_batch, sample_args.pred_length)

        # ipdb.set_trace()
        # complete_traj is an array of shape (obs_length+pred_length) x maxNumPeds x 3
        total_error += get_mean_error(complete_traj, x[0], sample_args.obs_length, saved_args.maxNumPeds)

        print "Processed trajectory number : ", b, "out of ", data_loader.num_batches, " trajectories"

        # plot_trajectories(x[0], complete_traj, sample_args.obs_length)
        # return
        results.append((x[0], complete_traj, sample_args.obs_length))

    # Print the mean error across all the batches
    print "Total mean error of the model is ", total_error/data_loader.num_batches

    print "Saving results"
    with open(os.path.join(save_path, 'social_results.pkl'), 'wb') as f:
        pickle.dump(results, f)
예제 #12
0
def main():

    np.random.seed(1)

    parser = argparse.ArgumentParser()
    # 观测轨迹长度
    parser.add_argument('--obs_length',
                        type=int,
                        default=7,
                        help='Observed length of the trajectory')
    # 预测轨迹长度
    parser.add_argument('--pred_length',
                        type=int,
                        default=5,
                        help='Predicted length of the trajectory')
    # 测试数据集
    parser.add_argument('--test_dataset',
                        type=int,
                        default=2,
                        help='Epoch of model to be loaded')

    # 导入的模型
    parser.add_argument('--epoch',
                        type=int,
                        default=8,
                        help='Epoch of model to be loaded')

    sample_args = parser.parse_args(args=[])

    # 存储历史
    save_directory = 'save/' + str(sample_args.test_dataset) + '/'

    # Define the path for the config file for saved args
    with open(os.path.join(save_directory, 'social_config.pkl'), 'rb') as f:
        saved_args = pickle.load(f)
    # Create a SocialModel object with the saved_args and infer set to true
    model = SocialModel(saved_args, True)
    # Initialize a TensorFlow session
    config = tf.ConfigProto(
        log_device_placement=True
    )  # Showing which device is allocated (in case of multiple GPUs)
    config.gpu_options.per_process_gpu_memory_fraction = 0.8  # Allocating 20% of memory in each GPU
    sess = tf.InteractiveSession(config=config)
    # Initialize a saver
    saver = tf.train.Saver()

    # Get the checkpoint state for the model
    ckpt = tf.train.get_checkpoint_state(save_directory)
    # print ('loading model: ', ckpt.model_checkpoint_path)
    print('loading model: ',
          ckpt.all_model_checkpoint_paths[sample_args.epoch])

    # Restore the model at the checkpoint
    saver.restore(sess, ckpt.all_model_checkpoint_paths[sample_args.epoch])

    # Dataset to get data from
    dataset = [sample_args.test_dataset]

    # Create a SocialDataLoader object with batch_size 1 and seq_length equal to observed_length + pred_length
    data_loader = SocialDataLoader(1,
                                   sample_args.pred_length +
                                   sample_args.obs_length,
                                   saved_args.maxNumPeds,
                                   dataset,
                                   True,
                                   infer=True)

    # Reset all pointers of the data_loader
    data_loader.reset_batch_pointer()

    results = []

    # Variable to maintain total error
    total_error = 0
    # For each batch
    for b in range(data_loader.num_batches):
        # Get the source, target and dataset data for the next batch
        x, y, d = data_loader.next_batch(randomUpdate=False)

        # Batch size is 1
        x_batch, y_batch, d_batch = x[0], y[0], d[0]

        if d_batch == 0 and dataset[0] == 0:
            dimensions = [640, 480]
        else:
            dimensions = [720, 576]

        grid_batch = getSequenceGridMask(x_batch, dimensions,
                                         saved_args.neighborhood_size,
                                         saved_args.grid_size)

        obs_traj = x_batch[:sample_args.obs_length]
        obs_grid = grid_batch[:sample_args.obs_length]
        # obs_traj is an array of shape obs_length x maxNumPeds x 3

        print("********************** SAMPLING A NEW TRAJECTORY", b,
              "******************************")
        complete_traj = model.sample(sess, obs_traj, obs_grid, dimensions,
                                     x_batch, sample_args.pred_length)

        # ipdb.set_trace()
        # complete_traj is an array of shape (obs_length+pred_length) x maxNumPeds x 3
        total_error += get_mean_error(complete_traj, x[0],
                                      sample_args.obs_length,
                                      saved_args.maxNumPeds)

        print("Processed trajectory number : ", b, "out of ",
              data_loader.num_batches, " trajectories")
        print('Model loaded: ',
              ckpt.all_model_checkpoint_paths[sample_args.epoch])

        # plot_trajectories(x[0], complete_traj, sample_args.obs_length)
        # return
        results.append((x[0], complete_traj, sample_args.obs_length))

    # Print the mean error across all the batches
    print("Total mean error of the model is ",
          total_error / data_loader.num_batches)

    print("Saving results")
    with open(os.path.join(save_directory, 'social_results.pkl'), 'wb') as f:
        pickle.dump(results, f)
예제 #13
0
def train(args):
    datasets = list(range(2))

    data_loader = SocialDataLoader(args.batch_size,
                                   args.seq_length,
                                   args.max_num_peds,
                                   datasets,
                                   forcePreProcess=True)

    model = SocialModel(args)

    optimizer = tf.keras.optimizers.RMSprop(args.learning_rate, decay=5e-4)

    for e in range(args.num_epochs):

        data_loader.reset_batch_pointer()

        for batch in range(data_loader.num_batches):
            start = time.time()

            x, y, d, num_peds, ped_ids = data_loader.next_batch()

            for batch in range(data_loader.batch_size):

                x_batch, y_batch, d_batch, num_ped_batch, ped_id_batch = x[
                    batch], y[batch], d[batch], num_peds[batch], ped_ids[batch]

                if d_batch == 0 and datasets[0] == 0:
                    dataset_data = [640, 480]
                else:
                    dataset_data = [720, 576]

                print(ped_id_batch)

                print(num_ped_batch)

                grid_batch = get_sequence_grid_mask(x_batch, dataset_data,
                                                    args.neighborhood_size,
                                                    args.grid_size)

                # print("grid batch size:{}".format(grid_batch.shape))
                # print(np.where(grid_batch > 0))

                # ped_ids_index = dict(zip(ped_id_batch, range(0, len(ped_id_batch))))
                x_batch, ped_ids_index = data_loader.convert_proper_array(
                    x_batch, num_ped_batch, ped_id_batch)

                train_loss = 0.0
                with tf.GradientTape() as tape:
                    tensor_x = tf.convert_to_tensor(x_batch, dtype=tf.float32)

                    logits = model(tensor_x, ped_id_batch, ped_ids_index)

                    [o_mux, o_muy, o_sx, o_sy, o_corr] = get_coef(logits)

                    # reshape target data so that it aligns with predictions
                    tensor_y = tf.convert_to_tensor(y, dtype=tf.float32)

                    # flat_target_data = tf.reshape(tensor_y, [-1, 2])
                    # Extract the x-coordinates and y-coordinates from the target data
                    [x_data, y_data] = tf.split(tensor_y, 2, -1)

                    # Compute the loss function
                    loss = get_lossfunc(o_mux, o_muy, o_sx, o_sy, o_corr,
                                        x_data, y_data)

                    # Compute the cost
                    train_loss = tf.math.divide(
                        loss, (args.batch_size * args.seq_length))

                    grads = tape.gradient(train_loss,
                                          model.trainable_variables)

                    optimizer.apply_gradients(
                        zip(grads, model.trainable_variables))

                end = time.time()
                # Print epoch, batch, loss and time taken
                print(
                    "{}/{} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}"
                    .format(e * data_loader.num_batches + batch,
                            args.num_epochs * data_loader.num_batches, e,
                            train_loss, end - start))