Exemple #1
0
def encode_inputs(X, y, place_cell_ensembles, head_direction_ensembles, cuda=True, coder=None):
    init_pos , init_hd, ego_vel = X
    target_pos, target_hd = y


    initial_conds = utils.encode_initial_conditions(init_pos ,
                                                    init_hd,
                                                    place_cell_ensembles,
                                                    head_direction_ensembles)

    ensembles_targets = utils.encode_targets(target_pos,
                                            target_hd,
                                            place_cell_ensembles,
                                            head_direction_ensembles)
    inputs = ego_vel
    if cuda:

        init_pos = init_pos.cuda()
        init_hd = init_hd.cuda()
        inputs = inputs.cuda()
        target_pos = target_pos.cuda()
        target_hd = target_hd.cuda()
        initial_conds = tuple(map(to_cuda, initial_conds))

    if coder:
        inputs = coder(inputs, value=torch.Tensor([0., 1., 0.]))
        target_pos = coder(target_pos, target=True)
        target_hd = coder(target_hd, target=True)

    inputs = inputs.transpose(1,0)
    return init_pos, init_hd, inputs, target_pos, target_hd, initial_conds, ensembles_targets
Exemple #2
0
def train():
  """Training loop."""

  tf.reset_default_graph()

  # Create the motion models for training and evaluation
  data_reader = dataset_reader.DataReader(
      FLAGS.task_dataset_info, root=FLAGS.task_root, num_threads=4)
  train_traj = data_reader.read(batch_size=FLAGS.training_minibatch_size)

  # Create the ensembles that provide targets during training
  place_cell_ensembles = utils.get_place_cell_ensembles(
      env_size=FLAGS.task_env_size,
      neurons_seed=FLAGS.task_neurons_seed,
      targets_type=FLAGS.task_targets_type,
      lstm_init_type=FLAGS.task_lstm_init_type,
      n_pc=FLAGS.task_n_pc,
      pc_scale=FLAGS.task_pc_scale)

  head_direction_ensembles = utils.get_head_direction_ensembles(
      neurons_seed=FLAGS.task_neurons_seed,
      targets_type=FLAGS.task_targets_type,
      lstm_init_type=FLAGS.task_lstm_init_type,
      n_hdc=FLAGS.task_n_hdc,
      hdc_concentration=FLAGS.task_hdc_concentration)
  target_ensembles = place_cell_ensembles + head_direction_ensembles

  # Model creation
  rnn_core = model.GridCellsRNNCell(
      target_ensembles=target_ensembles,
      nh_lstm=FLAGS.model_nh_lstm,
      nh_bottleneck=FLAGS.model_nh_bottleneck,
      dropoutrates_bottleneck=np.array(FLAGS.model_dropout_rates),
      bottleneck_weight_decay=FLAGS.model_weight_decay,
      bottleneck_has_bias=FLAGS.model_bottleneck_has_bias,
      init_weight_disp=FLAGS.model_init_weight_disp)
  rnn = model.GridCellsRNN(rnn_core, FLAGS.model_nh_lstm)

  # Get a trajectory batch
  input_tensors = []
  init_pos, init_hd, ego_vel, target_pos, target_hd = train_traj
  if FLAGS.task_velocity_inputs:
    # Add the required amount of noise to the velocities
    vel_noise = tf.distributions.Normal(0.0, 1.0).sample(
        sample_shape=ego_vel.get_shape()) * FLAGS.task_velocity_noise
    input_tensors = [ego_vel + vel_noise] + input_tensors
  # Concatenate all inputs
  inputs = tf.concat(input_tensors, axis=2)

  # Replace euclidean positions and angles by encoding of place and hd ensembles
  # Note that the initial_conds will be zeros if the ensembles were configured
  # to provide that type of initialization
  initial_conds = utils.encode_initial_conditions(
      init_pos, init_hd, place_cell_ensembles, head_direction_ensembles)

  # Encode targets as well
  ensembles_targets = utils.encode_targets(
      target_pos, target_hd, place_cell_ensembles, head_direction_ensembles)

  # Estimate future encoding of place and hd ensembles inputing egocentric vels
  outputs, _ = rnn(initial_conds, inputs, training=True)
  ensembles_logits, bottleneck, lstm_output = outputs

  # Training loss
  pc_loss = tf.nn.softmax_cross_entropy_with_logits_v2(
      labels=ensembles_targets[0], logits=ensembles_logits[0], name='pc_loss')
  hd_loss = tf.nn.softmax_cross_entropy_with_logits_v2(
      labels=ensembles_targets[1], logits=ensembles_logits[1], name='hd_loss')
  total_loss = pc_loss + hd_loss
  train_loss = tf.reduce_mean(total_loss, name='train_loss')

  # Optimisation ops
  optimizer_class = eval(FLAGS.training_optimizer_class)  # pylint: disable=eval-used
  optimizer = optimizer_class(**eval(FLAGS.training_optimizer_options))  # pylint: disable=eval-used
  grad = optimizer.compute_gradients(train_loss)
  clip_gradient = eval(FLAGS.training_clipping_function)  # pylint: disable=eval-used
  clipped_grad = [
      clip_gradient(g, var, FLAGS.training_clipping) for g, var in grad
  ]
  train_op = optimizer.apply_gradients(clipped_grad)

  # Store the grid scores
  grid_scores = dict()
  grid_scores['btln_60'] = np.zeros((FLAGS.model_nh_bottleneck,))
  grid_scores['btln_90'] = np.zeros((FLAGS.model_nh_bottleneck,))
  grid_scores['btln_60_separation'] = np.zeros((FLAGS.model_nh_bottleneck,))
  grid_scores['btln_90_separation'] = np.zeros((FLAGS.model_nh_bottleneck,))
  grid_scores['lstm_60'] = np.zeros((FLAGS.model_nh_lstm,))
  grid_scores['lstm_90'] = np.zeros((FLAGS.model_nh_lstm,))

  # Create scorer objects
  starts = [0.2] * 10
  ends = np.linspace(0.4, 1.0, num=10)
  masks_parameters = zip(starts, ends.tolist())
  latest_epoch_scorer = scores.GridScorer(20, data_reader.get_coord_range(),
                                          masks_parameters)

  with tf.train.SingularMonitoredSession() as sess:
    for epoch in range(FLAGS.training_epochs):
      loss_acc = list()
      for _ in range(FLAGS.training_steps_per_epoch):
        res = sess.run({'train_op': train_op, 'total_loss': train_loss})
        loss_acc.append(res['total_loss'])

      tf.logging.info('Epoch %i, mean loss %.5f, std loss %.5f', epoch,
                      np.mean(loss_acc), np.std(loss_acc))
      if epoch % FLAGS.saver_eval_time == 0:
        res = dict()
        for _ in xrange(FLAGS.training_evaluation_minibatch_size //
                        FLAGS.training_minibatch_size):
          mb_res = sess.run({
              'bottleneck': bottleneck,
              'lstm': lstm_output,
              'pos_xy': target_pos
          })
          res = utils.concat_dict(res, mb_res)

        # Store at the end of validation
        filename = 'rates_and_sac_latest_hd.pdf'
        grid_scores['btln_60'], grid_scores['btln_90'], grid_scores[
            'btln_60_separation'], grid_scores[
                'btln_90_separation'] = utils.get_scores_and_plot(
                    latest_epoch_scorer, res['pos_xy'], res['bottleneck'],
                    FLAGS.saver_results_directory, filename)
Exemple #3
0
for j, train_traj in enumerate(dataset):
    if j == 400:
        break
    train_traj = train_traj['init_pos'], train_traj['init_hd'], train_traj[
        'ego_vel'], train_traj['target_pos'], train_traj['target_hd']
    init_pos, init_hd, ego_vel, target_pos, target_hd = train_traj
    init_hd = tf.reshape(init_hd, [-1, 1])
    target_hd = tf.reshape(target_hd, [10, -1, 1])
    init_pos = tf.cast(init_pos, tf.float32)
    init_hd = tf.cast(init_hd, tf.float32)
    ego_vel = tf.cast(ego_vel, tf.float32)
    target_pos = tf.cast(target_pos, tf.float32)
    target_hd = tf.cast(target_hd, tf.float32)
    inputs = tf.concat(ego_vel, axis=2)
    initial_conds = utils.encode_initial_conditions(init_pos, init_hd,
                                                    place_cell_ensembles,
                                                    head_direction_ensembles)
    outputs, final_state = model(inputs, initial_conds, training=False)
    if first:
        model.load_weights("rat_weights.h5")
        first = False
    ensembles_logits, bottleneck, lstm_output = outputs
    for i in range(len(target_pos)):
        mb_res = {
            "bottleneck": bottleneck[i],
            "lstm_out": lstm_output[i],
            "pos_xy": target_pos[i]
        }
        res_bottleneck.append(np.array(bottleneck[i]))
        res_lstm_out.append(np.array(lstm_output[i]))
        res_pos_xy.append(np.array(target_pos[i]))
Exemple #4
0
def train():
    """Training loop."""
    # Create the ensembles that provide targets during training
    place_cell_ensembles = utils.get_place_cell_ensembles(
        env_size=FLAGS['task_env_size'],
        neurons_seed=FLAGS['task_neurons_seed'],
        targets_type=FLAGS['task_targets_type'],
        lstm_init_type=FLAGS['task_lstm_init_type'],
        n_pc=FLAGS['task_n_pc'],
        pc_scale=FLAGS['task_pc_scale'])
    head_direction_ensembles = utils.get_head_direction_ensembles(
        neurons_seed=FLAGS['task_neurons_seed'],
        targets_type=FLAGS['task_targets_type'],
        lstm_init_type=FLAGS['task_lstm_init_type'],
        n_hdc=FLAGS['task_n_hdc'],
        hdc_concentration=FLAGS['task_hdc_concentration'])
    target_ensembles = place_cell_ensembles + head_direction_ensembles
    # Store the grid scores
    '''grid_scores = dict()
    grid_scores['btln_60'] = np.zeros((FLAGS['model_nh_bottleneck'],))
    grid_scores['btln_90'] = np.zeros((FLAGS['model_nh_bottleneck'],))
    grid_scores['btln_60_separation'] = np.zeros((FLAGS['model_nh_bottleneck'],))
    grid_scores['btln_90_separation'] = np.zeros((FLAGS['model_nh_bottleneck'],))
    grid_scores['lstm_60'] = np.zeros((FLAGS['model_nh_lstm'],))
    grid_scores['lstm_90'] = np.zeros((FLAGS['model_nh_lstm'],))

    # Create scorer objects
    starts = [0.2] * 10
    ends = np.linspace(0.4, 1.0, num=10)
    masks_parameters = zip(starts, ends.tolist())
    latest_epoch_scorer = scores.GridScorer(20, data_reader.get_coord_range(),
                                            masks_parameters)'''

    #tf.compat.v1.reset_default_graph()

    data_reader = dataset_reader.DataReader(
        FLAGS['task_dataset_info'],
        root=FLAGS['task_root'],
        num_threads=4,
        batch_size=FLAGS['training_minibatch_size'])

    # Model creation
    rnn = model.GridCellNetwork(
        target_ensembles=target_ensembles,
        nh_lstm=FLAGS['model_nh_lstm'],
        nh_bottleneck=FLAGS['model_nh_bottleneck'],
        dropoutrates_bottleneck=np.array(FLAGS['model_dropout_rates']),
        bottleneck_weight_decay=FLAGS['model_weight_decay'],
        bottleneck_has_bias=FLAGS['model_bottleneck_has_bias'],
        init_weight_disp=FLAGS['model_init_weight_disp'])

    optimizer = tf.keras.optimizers.RMSprop(learning_rate=1e-5,
                                            momentum=0.9,
                                            clipvalue=1e-5)
    for epoch in range(1000):
        loss_metric = tf.keras.metrics.Mean()
        for batch in range(1000):
            train_traj = data_reader.read()
            init_pos, init_hd, ego_vel, target_pos, target_hd = train_traj
            input_tensors = []
            if FLAGS['task_velocity_inputs']:
                vel_noise = tfp.distributions.Normal(
                    0.0, 1.0).sample(sample_shape=tf.shape(
                        ego_vel)) * FLAGS['task_velocity_noise']
                input_tensors = [ego_vel + vel_noise] + input_tensors
            inputs = tf.concat(input_tensors, axis=2)
            initial_conds = utils.encode_initial_conditions(
                init_pos, init_hd, place_cell_ensembles,
                head_direction_ensembles)
            ensembles_targets = utils.encode_targets(target_pos, target_hd,
                                                     place_cell_ensembles,
                                                     head_direction_ensembles)
            loss, gradients = train_step(inputs, initial_conds,
                                         ensembles_targets, rnn)
            back_pass(rnn, optimizer, gradients)
            loss_metric(loss)
        print("epoch {}, loss {}".format(epoch, loss_metric.result()))
        loss_metric.reset_states()
Exemple #5
0
def train():
    """Training loop."""

    #tf.reset_default_graph()

    # Create the motion models for training and evaluation
    data_reader = dataset_reader.DataReader(
        FLAGS['task_dataset_info'],
        root=FLAGS['task_root'],
        batch_size=FLAGS['training_minibatch_size'])
    dataset = data_reader.read()
    #train_traj = data_reader.read()
    # Create the ensembles that provide targets during training
    place_cell_ensembles = utils.get_place_cell_ensembles(
        env_size=FLAGS['task_env_size'],
        neurons_seed=FLAGS['task_neurons_seed'],
        targets_type=FLAGS['task_targets_type'],
        lstm_init_type=FLAGS['task_lstm_init_type'],
        n_pc=FLAGS['task_n_pc'],
        pc_scale=FLAGS['task_pc_scale'])

    head_direction_ensembles = utils.get_head_direction_ensembles(
        neurons_seed=FLAGS['task_neurons_seed'],
        targets_type=FLAGS['task_targets_type'],
        lstm_init_type=FLAGS['task_lstm_init_type'],
        n_hdc=FLAGS['task_n_hdc'],
        hdc_concentration=FLAGS['task_hdc_concentration'])

    target_ensembles = place_cell_ensembles + head_direction_ensembles
    '''
    # Get a trajectory batch
    input_tensors = []
    init_pos, init_hd, ego_vel, target_pos, target_hd = train_traj
    if FLAGS['task_velocity_inputs']:
        # Add the required amount of noise to the velocities
        vel_noise = tfb.distributions.Normal(0.0, 1.0).sample(
            sample_shape=ego_vel.get_shape()) * FLAGS['task_velocity_noise']
        input_tensors = [ego_vel + vel_noise] + input_tensors
    # Concatenate all inputs
    inputs = tf.concat(input_tensors, axis=2)

    # Replace euclidean positions and angles by encoding of place and hd ensembles
    # Note that the initial_conds will be zeros if the ensembles were configured
    # to provide that type of initialization
    initial_conds = utils.encode_initial_conditions(
        init_pos, init_hd, place_cell_ensembles, head_direction_ensembles)

    # Encode targets as well
    ensembles_targets = utils.encode_targets(
        target_pos, target_hd, place_cell_ensembles, head_direction_ensembles)

    # Estimate future encoding of place and hd ensembles inputing egocentric vels
    '''

    #Defining model
    grid_cell_model = GridCellNetwork(
        target_ensembles=target_ensembles,
        nh_lstm=FLAGS['model_nh_lstm'],
        nh_bottleneck=FLAGS['model_nh_bottleneck'],
        dropoutrates_bottleneck=FLAGS['model_dropout_rates'],
        bottleneck_weight_decay=FLAGS['model_weight_decay'],
        bottleneck_has_bias=FLAGS['model_bottleneck_has_bias'],
        init_weight_disp=FLAGS['model_init_weight_disp'],
    )

    # Store the grid scores
    grid_scores = dict()
    grid_scores['btln_60'] = np.zeros((FLAGS['model_nh_bottleneck'], ))
    grid_scores['btln_90'] = np.zeros((FLAGS['model_nh_bottleneck'], ))
    grid_scores['btln_60_separation'] = np.zeros(
        (FLAGS['model_nh_bottleneck'], ))
    grid_scores['btln_90_separation'] = np.zeros(
        (FLAGS['model_nh_bottleneck'], ))
    grid_scores['lstm_60'] = np.zeros((FLAGS['model_nh_lstm'], ))
    grid_scores['lstm_90'] = np.zeros((FLAGS['model_nh_lstm'], ))

    # Create scorer objects
    starts = [0.2] * 10
    ends = np.linspace(0.4, 1.0, num=10)
    masks_parameters = zip(starts, ends.tolist())
    latest_epoch_scorer = scores.GridScorer(20, data_reader.get_coord_range(),
                                            masks_parameters)

    # we can run without feed_dict either because we are in the singular monitored session
    # so we use fetches on sess.run
    # pos_xy is simply the input data. In tf1 it's the computed graph up until target_pos, which is very early and basically just fetched the data.

    optimizer = tf.keras.optimizers.RMSprop(
        learning_rate=1e-5, momentum=0.9, clipvalue=FLAGS['training_clipping'])
    train_loss = tf.keras.metrics.Mean(name='train_loss')
    train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
        name='train_accuracy')

    pos_xy = []
    bottleneck = []
    lstm_output = []

    @tf.function
    def loss_function(targets, logits):
        pc_loss = tf.nn.softmax_cross_entropy_with_logits(labels=targets[0],
                                                          logits=logits[0],
                                                          name='pc_loss')
        hd_loss = tf.nn.softmax_cross_entropy_with_logits(labels=targets[1],
                                                          logits=logits[1],
                                                          name='hd_loss')
        total_loss = pc_loss + hd_loss
        return tf.reduce_mean(total_loss, name='train_loss')

    @tf.function
    def train_step(velocities, targets, logits):
        global preprocess_time
        with tf.GradientTape() as tape:
            predictions, _ = grid_cell_model(velocities,
                                             initial_conds,
                                             trainable=True)
            ensembles_logits, bottleneck, lstm_output = predictions
            loss = loss_function(targets, ensembles_logits)
        gradients = tape.gradient(loss, grid_cell_model.trainable_weights)

        optimizer.apply_gradients(
            zip(gradients, grid_cell_model.trainable_weights))

        train_loss(loss)
        return {
            "bottleneck": bottleneck,
            "lstm_output": lstm_output,
            "pos_xy": target_pos
        }

    for epoch in range(FLAGS['training_epochs']):
        loss_acc = list()
        res = dict()
        #for _ in range(FLAGS['training_steps_per_epoch']):
        train_loss.reset_states()
        for batch, train_trajectory in enumerate(dataset):
            print(batch)
            '''some preprocessing that maybe should be done in the data_pipeline'''
            start_time = time.time()
            init_pos = train_trajectory['init_pos']
            init_hd = train_trajectory['init_hd']
            ego_vel = train_trajectory['ego_vel']
            target_pos = train_trajectory['target_pos']
            target_hd = train_trajectory['target_hd']
            input_tensors = []
            if FLAGS['task_velocity_inputs']:
                # Add the required amount of noise to the velocities
                vel_noise = tfb.distributions.Normal(
                    0.0, 1.0).sample(sample_shape=tf.shape(
                        ego_vel)) * FLAGS['task_velocity_noise']
                input_tensors = [ego_vel + vel_noise] + input_tensors
            velocities = tf.concat(input_tensors, axis=2)
            initial_conds = utils.encode_initial_conditions(
                init_pos, init_hd, place_cell_ensembles,
                head_direction_ensembles)
            ensembles_targets = utils.encode_targets(target_pos, target_hd,
                                                     place_cell_ensembles,
                                                     head_direction_ensembles)
            mb_res = train_step(velocities, ensembles_targets, initial_conds)
            #res = utils.concat_dict(res, mb_res)

            if batch % 1000 > 600:
                pos_xy.append(mb_res['pos_xy'])
                bottleneck.append(mb_res['bottleneck'])
                lstm_output.append(mb_res['lstm_output'])

            if batch % 1000 == 0 and batch != 0:
                print(preprocess_time)
                print('Epoch {}, batch {}, loss {}'.format(
                    epoch, batch, train_loss.result()))
                for i in range(len(pos_xy)):
                    mb_res = {
                        "bottleneck": bottleneck[i],
                        "lstm_out": lstm_output[i],
                        "pos_xy": pos_xy[i]
                    }
                    utils.concat_dict(res, mb_res)
                pos_xy = []
                bottleneck = []
                lstm_output = []
                mb_res = dict()
                # Store at the end of validation
                filename = 'rates_and_sac_latest_hd.pdf'
                grid_scores['btln_60'], grid_scores['btln_90'], grid_scores[
                    'btln_60_separation'], grid_scores[
                        'btln_90_separation'] = utils.get_scores_and_plot(
                            latest_epoch_scorer, res['pos_xy'],
                            res['bottleneck'],
                            FLAGS['saver_results_directory'], filename)
                res = dict()