Exemple #1
0
def train():
  """Training loop."""

  tf.reset_default_graph()

  # Create the motion models for training and evaluation
  data_reader = dataset_reader.DataReader(
      FLAGS.task_dataset_info, root=FLAGS.task_root, num_threads=4)
  train_traj = data_reader.read(batch_size=FLAGS.training_minibatch_size)

  # Create the ensembles that provide targets during training
  place_cell_ensembles = utils.get_place_cell_ensembles(
      env_size=FLAGS.task_env_size,
      neurons_seed=FLAGS.task_neurons_seed,
      targets_type=FLAGS.task_targets_type,
      lstm_init_type=FLAGS.task_lstm_init_type,
      n_pc=FLAGS.task_n_pc,
      pc_scale=FLAGS.task_pc_scale)

  head_direction_ensembles = utils.get_head_direction_ensembles(
      neurons_seed=FLAGS.task_neurons_seed,
      targets_type=FLAGS.task_targets_type,
      lstm_init_type=FLAGS.task_lstm_init_type,
      n_hdc=FLAGS.task_n_hdc,
      hdc_concentration=FLAGS.task_hdc_concentration)
  target_ensembles = place_cell_ensembles + head_direction_ensembles

  # Model creation
  rnn_core = model.GridCellsRNNCell(
      target_ensembles=target_ensembles,
      nh_lstm=FLAGS.model_nh_lstm,
      nh_bottleneck=FLAGS.model_nh_bottleneck,
      dropoutrates_bottleneck=np.array(FLAGS.model_dropout_rates),
      bottleneck_weight_decay=FLAGS.model_weight_decay,
      bottleneck_has_bias=FLAGS.model_bottleneck_has_bias,
      init_weight_disp=FLAGS.model_init_weight_disp)
  rnn = model.GridCellsRNN(rnn_core, FLAGS.model_nh_lstm)

  # Get a trajectory batch
  input_tensors = []
  init_pos, init_hd, ego_vel, target_pos, target_hd = train_traj
  if FLAGS.task_velocity_inputs:
    # Add the required amount of noise to the velocities
    vel_noise = tf.distributions.Normal(0.0, 1.0).sample(
        sample_shape=ego_vel.get_shape()) * FLAGS.task_velocity_noise
    input_tensors = [ego_vel + vel_noise] + input_tensors
  # Concatenate all inputs
  inputs = tf.concat(input_tensors, axis=2)

  # Replace euclidean positions and angles by encoding of place and hd ensembles
  # Note that the initial_conds will be zeros if the ensembles were configured
  # to provide that type of initialization
  initial_conds = utils.encode_initial_conditions(
      init_pos, init_hd, place_cell_ensembles, head_direction_ensembles)

  # Encode targets as well
  ensembles_targets = utils.encode_targets(
      target_pos, target_hd, place_cell_ensembles, head_direction_ensembles)

  # Estimate future encoding of place and hd ensembles inputing egocentric vels
  outputs, _ = rnn(initial_conds, inputs, training=True)
  ensembles_logits, bottleneck, lstm_output = outputs

  # Training loss
  pc_loss = tf.nn.softmax_cross_entropy_with_logits_v2(
      labels=ensembles_targets[0], logits=ensembles_logits[0], name='pc_loss')
  hd_loss = tf.nn.softmax_cross_entropy_with_logits_v2(
      labels=ensembles_targets[1], logits=ensembles_logits[1], name='hd_loss')
  total_loss = pc_loss + hd_loss
  train_loss = tf.reduce_mean(total_loss, name='train_loss')

  # Optimisation ops
  optimizer_class = eval(FLAGS.training_optimizer_class)  # pylint: disable=eval-used
  optimizer = optimizer_class(**eval(FLAGS.training_optimizer_options))  # pylint: disable=eval-used
  grad = optimizer.compute_gradients(train_loss)
  clip_gradient = eval(FLAGS.training_clipping_function)  # pylint: disable=eval-used
  clipped_grad = [
      clip_gradient(g, var, FLAGS.training_clipping) for g, var in grad
  ]
  train_op = optimizer.apply_gradients(clipped_grad)

  # Store the grid scores
  grid_scores = dict()
  grid_scores['btln_60'] = np.zeros((FLAGS.model_nh_bottleneck,))
  grid_scores['btln_90'] = np.zeros((FLAGS.model_nh_bottleneck,))
  grid_scores['btln_60_separation'] = np.zeros((FLAGS.model_nh_bottleneck,))
  grid_scores['btln_90_separation'] = np.zeros((FLAGS.model_nh_bottleneck,))
  grid_scores['lstm_60'] = np.zeros((FLAGS.model_nh_lstm,))
  grid_scores['lstm_90'] = np.zeros((FLAGS.model_nh_lstm,))

  # Create scorer objects
  starts = [0.2] * 10
  ends = np.linspace(0.4, 1.0, num=10)
  masks_parameters = zip(starts, ends.tolist())
  latest_epoch_scorer = scores.GridScorer(20, data_reader.get_coord_range(),
                                          masks_parameters)

  with tf.train.SingularMonitoredSession() as sess:
    for epoch in range(FLAGS.training_epochs):
      loss_acc = list()
      for _ in range(FLAGS.training_steps_per_epoch):
        res = sess.run({'train_op': train_op, 'total_loss': train_loss})
        loss_acc.append(res['total_loss'])

      tf.logging.info('Epoch %i, mean loss %.5f, std loss %.5f', epoch,
                      np.mean(loss_acc), np.std(loss_acc))
      if epoch % FLAGS.saver_eval_time == 0:
        res = dict()
        for _ in xrange(FLAGS.training_evaluation_minibatch_size //
                        FLAGS.training_minibatch_size):
          mb_res = sess.run({
              'bottleneck': bottleneck,
              'lstm': lstm_output,
              'pos_xy': target_pos
          })
          res = utils.concat_dict(res, mb_res)

        # Store at the end of validation
        filename = 'rates_and_sac_latest_hd.pdf'
        grid_scores['btln_60'], grid_scores['btln_90'], grid_scores[
            'btln_60_separation'], grid_scores[
                'btln_90_separation'] = utils.get_scores_and_plot(
                    latest_epoch_scorer, res['pos_xy'], res['bottleneck'],
                    FLAGS.saver_results_directory, filename)
Exemple #2
0
        encoding_func=encoding_func,
    )

    predictions = predictions / ssp_scaling
    coords = coords / ssp_scaling

print(np.max(predictions))
print(np.min(predictions))
# assert False

# grid_scores['btln_60'], grid_scores['btln_90'], \
# grid_scores['btln_60_separation'], grid_scores['btln_90_separation'] = utils.get_scores_and_plot(
grid_scores_60_pred, grid_scores_90_pred, grid_scores_60_separation_pred, grid_scores_90_separation_pred = utils.get_scores_and_plot(
    scorer=latest_epoch_scorer,
    data_abs_xy=predictions,  #res['pos_xy'],
    activations=activations,  #res['bottleneck'],
    directory='output_scores',  #FLAGS.saver_results_directory,
    filename=fname_pred,
)

grid_scores_60_truth, grid_scores_90_truth, grid_scores_60_separation_truth, grid_scores_90_separation_truth = utils.get_scores_and_plot(
    scorer=latest_epoch_scorer,
    data_abs_xy=coords,  #res['pos_xy'],
    activations=activations,  #res['bottleneck'],
    directory='output_scores',  #FLAGS.saver_results_directory,
    filename=fname_truth,
)

print(grid_scores_60_truth, grid_scores_90_truth,
      grid_scores_60_separation_truth, grid_scores_90_separation_truth)
Exemple #3
0
with open("rat_samples", "wb+") as w:
    pickle.dump([predictions, targets], w)
'''
trajectory_predict = ensembles_logits[0].numpy()
trajectory_groundtruth = target_pos
'''

res_bottleneck = np.array(res_bottleneck)
res_lstm_out = np.array(res_lstm_out)
res_pos_xy = np.array(res_pos_xy)
# Store at the end of validation
filename = 'rates_and_sac_latest_hd_' + "lastsnake_32_225_2" + '.pdf'
grid_scores['btln_60'], grid_scores['btln_90'], grid_scores[
    'btln_60_separation'], grid_scores[
        'btln_90_separation'] = utils.get_scores_and_plot(
            latest_epoch_scorer, res_pos_xy, res_bottleneck,
            FLAGS['saver_results_directory'], filename)


def map_trajectory(trajectory, real_points):
    for i in range(0, len(x), 1):
        plt.plot(x[i:i + 2], y[i:i + 2], 'ro-')
        plt.plot(x[i:i + 2], y[i:i + 2], 'kx')
    plt.save("sample_trajectory.png")
    plt.show()


map_trajectory(trajectory_predict, trajectory_groundtruth)

#grid cells and select the correct ones
#Some kind of grid-celledness metric
Exemple #4
0
                  tf_weights_loc='weights/')

for X, y in data_generator:
    break

init_pos, init_hd, ego_vel = X
target_pos, target_hd = y

initial_conds = utils.encode_initial_conditions(init_pos, init_hd,
                                                place_cell_ensembles,
                                                head_direction_ensembles)
ensembles_targets = utils.encode_targets(target_pos, target_hd,
                                         place_cell_ensembles,
                                         head_direction_ensembles)

model.eval()

outs = model.forward(ego_vel.transpose(1, 0), initial_conds)
logits_hd, logits_pc, bottleneck_acts, lstm_states, _ = outs

acts = bottleneck_acts.transpose(1, 0).detach().numpy()
pos_xy = target_pos.detach().numpy()

# Create scorer objects
starts = [0.2] * 10
ends = np.linspace(0.4, 1.0, num=10)
masks_parameters = zip(starts, ends.tolist())
scorer = scores.GridScorer(20, ((-1.1, 1.1), (-1.1, 1.1)), masks_parameters)

scoress = utils.get_scores_and_plot(scorer, pos_xy, acts, '.', 'test.pdf')
Exemple #5
0
def train(hidden_size=128, g_size=256, batch_size=10, seed=8341):
    tf.reset_default_graph()
    dataset = 'square_room'

    # Create the motion models for training and evaluation
    data_reader = dataset_reader.DataReader(dataset,
                                            root=FLAGS.task_root,
                                            num_threads=4)
    train_traj = data_reader.read(batch_size=batch_size)

    # Create the ensembles that provide targets during training
    pcs = ensembles.PlaceCellEnsemble(256, stdev=0.01, env_size=2.2, seed=seed)
    hds = ensembles.HeadDirectionCellEnsemble(12, 20., seed)

    # Model creation
    rnn = model.GridCellsRNN([pcs, hds], hidden_size, g_size, g_has_bias=False)

    init_pos, init_hd, ego_vel, target_pos, target_hd = train_traj
    inputs = tf.concat([ego_vel], axis=2)

    pc_0 = tf.squeeze(pcs.get_init(init_pos[:, tf.newaxis, :]), axis=1)
    hd_0 = tf.squeeze(hds.get_init(init_hd[:, tf.newaxis, :]), axis=1)

    outputs, _ = rnn([pc_0, hd_0], inputs, training=True)
    (pc_logits, hd_logits), bottleneck, lstm_output = outputs

    pc_loss = pcs.loss(pc_logits, target_pos, name='pc_loss')
    hd_loss = hds.loss(hd_logits, target_hd, name='hd_loss')
    train_loss = tf.reduce_mean(pc_loss + hd_loss, name='train_loss')

    optimizer = tf.train.RMSPropOptimizer(learning_rate=1e-5, momentum=0.9)
    grad = optimizer.compute_gradients(train_loss)
    clipped_grad = [utils.clip_all_gradients(g, var, 1e-5) for g, var in grad]
    train_op = optimizer.apply_gradients(clipped_grad)

    # Store the grid scores
    grid_scores = dict(btln_60=np.zeros((g_size, )),
                       btln_90=np.zeros((g_size, )),
                       btln_60_separation=np.zeros((g_size, )),
                       btln_90_separation=np.zeros((g_size, )),
                       lstm_60=np.zeros((hidden_size, )),
                       lstm_90=np.zeros((hidden_size, )))

    # Create scorer objects
    starts = [0.2] * 10
    ends = np.linspace(0.4, 1.0, num=10)
    masks_parameters = zip(starts, ends.tolist())
    latest_epoch_scorer = scores.GridScorer(20, data_reader.get_coord_range(),
                                            masks_parameters)
    if False:
        S = Saver('testfile.hdf5')
        with tf.train.SingularMonitoredSession() as sess:
            for j in range(1000000):
                if (j + 1) % 100 == 0:
                    print(j + 1)
                S.save_traj(i.eval(session=sess) for i in train_traj)

    with tf.train.SingularMonitoredSession() as sess:
        for epoch in range(1000):
            loss_acc = list()
            for _ in range(1000):
                res = sess.run({
                    'train_op': train_op,
                    'total_loss': train_loss
                })
                loss_acc.append(res['total_loss'])

            tf.logging.info('Epoch %i, mean loss %.5f, std loss %.5f', epoch,
                            np.mean(loss_acc), np.std(loss_acc))
            if epoch % 2 == 0:
                res = dict()
                for _ in range(4000 // batch_size):
                    mb_res = sess.run({
                        'bottleneck': bottleneck,
                        'lstm': lstm_output,
                        'pos_xy': target_pos
                    })
                    res = utils.concat_dict(res, mb_res)

                # Store at the end of validation
                filename = 'rates_and_sac_latest_hd.pdf'
                grid_scores['btln_60'], grid_scores['btln_90'], grid_scores[
                    'btln_60_separation'], grid_scores[
                        'btln_90_separation'] = utils.get_scores_and_plot(
                            latest_epoch_scorer, res['pos_xy'],
                            res['bottleneck'], FLAGS.saver_results_directory,
                            filename)
Exemple #6
0
def train():
    """Training loop."""

    #tf.reset_default_graph()

    # Create the motion models for training and evaluation
    data_reader = dataset_reader.DataReader(
        FLAGS['task_dataset_info'],
        root=FLAGS['task_root'],
        batch_size=FLAGS['training_minibatch_size'])
    dataset = data_reader.read()
    #train_traj = data_reader.read()
    # Create the ensembles that provide targets during training
    place_cell_ensembles = utils.get_place_cell_ensembles(
        env_size=FLAGS['task_env_size'],
        neurons_seed=FLAGS['task_neurons_seed'],
        targets_type=FLAGS['task_targets_type'],
        lstm_init_type=FLAGS['task_lstm_init_type'],
        n_pc=FLAGS['task_n_pc'],
        pc_scale=FLAGS['task_pc_scale'])

    head_direction_ensembles = utils.get_head_direction_ensembles(
        neurons_seed=FLAGS['task_neurons_seed'],
        targets_type=FLAGS['task_targets_type'],
        lstm_init_type=FLAGS['task_lstm_init_type'],
        n_hdc=FLAGS['task_n_hdc'],
        hdc_concentration=FLAGS['task_hdc_concentration'])

    target_ensembles = place_cell_ensembles + head_direction_ensembles
    '''
    # Get a trajectory batch
    input_tensors = []
    init_pos, init_hd, ego_vel, target_pos, target_hd = train_traj
    if FLAGS['task_velocity_inputs']:
        # Add the required amount of noise to the velocities
        vel_noise = tfb.distributions.Normal(0.0, 1.0).sample(
            sample_shape=ego_vel.get_shape()) * FLAGS['task_velocity_noise']
        input_tensors = [ego_vel + vel_noise] + input_tensors
    # Concatenate all inputs
    inputs = tf.concat(input_tensors, axis=2)

    # Replace euclidean positions and angles by encoding of place and hd ensembles
    # Note that the initial_conds will be zeros if the ensembles were configured
    # to provide that type of initialization
    initial_conds = utils.encode_initial_conditions(
        init_pos, init_hd, place_cell_ensembles, head_direction_ensembles)

    # Encode targets as well
    ensembles_targets = utils.encode_targets(
        target_pos, target_hd, place_cell_ensembles, head_direction_ensembles)

    # Estimate future encoding of place and hd ensembles inputing egocentric vels
    '''

    #Defining model
    grid_cell_model = GridCellNetwork(
        target_ensembles=target_ensembles,
        nh_lstm=FLAGS['model_nh_lstm'],
        nh_bottleneck=FLAGS['model_nh_bottleneck'],
        dropoutrates_bottleneck=FLAGS['model_dropout_rates'],
        bottleneck_weight_decay=FLAGS['model_weight_decay'],
        bottleneck_has_bias=FLAGS['model_bottleneck_has_bias'],
        init_weight_disp=FLAGS['model_init_weight_disp'],
    )

    # Store the grid scores
    grid_scores = dict()
    grid_scores['btln_60'] = np.zeros((FLAGS['model_nh_bottleneck'], ))
    grid_scores['btln_90'] = np.zeros((FLAGS['model_nh_bottleneck'], ))
    grid_scores['btln_60_separation'] = np.zeros(
        (FLAGS['model_nh_bottleneck'], ))
    grid_scores['btln_90_separation'] = np.zeros(
        (FLAGS['model_nh_bottleneck'], ))
    grid_scores['lstm_60'] = np.zeros((FLAGS['model_nh_lstm'], ))
    grid_scores['lstm_90'] = np.zeros((FLAGS['model_nh_lstm'], ))

    # Create scorer objects
    starts = [0.2] * 10
    ends = np.linspace(0.4, 1.0, num=10)
    masks_parameters = zip(starts, ends.tolist())
    latest_epoch_scorer = scores.GridScorer(20, data_reader.get_coord_range(),
                                            masks_parameters)

    # we can run without feed_dict either because we are in the singular monitored session
    # so we use fetches on sess.run
    # pos_xy is simply the input data. In tf1 it's the computed graph up until target_pos, which is very early and basically just fetched the data.

    optimizer = tf.keras.optimizers.RMSprop(
        learning_rate=1e-5, momentum=0.9, clipvalue=FLAGS['training_clipping'])
    train_loss = tf.keras.metrics.Mean(name='train_loss')
    train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
        name='train_accuracy')

    pos_xy = []
    bottleneck = []
    lstm_output = []

    @tf.function
    def loss_function(targets, logits):
        pc_loss = tf.nn.softmax_cross_entropy_with_logits(labels=targets[0],
                                                          logits=logits[0],
                                                          name='pc_loss')
        hd_loss = tf.nn.softmax_cross_entropy_with_logits(labels=targets[1],
                                                          logits=logits[1],
                                                          name='hd_loss')
        total_loss = pc_loss + hd_loss
        return tf.reduce_mean(total_loss, name='train_loss')

    @tf.function
    def train_step(velocities, targets, logits):
        global preprocess_time
        with tf.GradientTape() as tape:
            predictions, _ = grid_cell_model(velocities,
                                             initial_conds,
                                             trainable=True)
            ensembles_logits, bottleneck, lstm_output = predictions
            loss = loss_function(targets, ensembles_logits)
        gradients = tape.gradient(loss, grid_cell_model.trainable_weights)

        optimizer.apply_gradients(
            zip(gradients, grid_cell_model.trainable_weights))

        train_loss(loss)
        return {
            "bottleneck": bottleneck,
            "lstm_output": lstm_output,
            "pos_xy": target_pos
        }

    for epoch in range(FLAGS['training_epochs']):
        loss_acc = list()
        res = dict()
        #for _ in range(FLAGS['training_steps_per_epoch']):
        train_loss.reset_states()
        for batch, train_trajectory in enumerate(dataset):
            print(batch)
            '''some preprocessing that maybe should be done in the data_pipeline'''
            start_time = time.time()
            init_pos = train_trajectory['init_pos']
            init_hd = train_trajectory['init_hd']
            ego_vel = train_trajectory['ego_vel']
            target_pos = train_trajectory['target_pos']
            target_hd = train_trajectory['target_hd']
            input_tensors = []
            if FLAGS['task_velocity_inputs']:
                # Add the required amount of noise to the velocities
                vel_noise = tfb.distributions.Normal(
                    0.0, 1.0).sample(sample_shape=tf.shape(
                        ego_vel)) * FLAGS['task_velocity_noise']
                input_tensors = [ego_vel + vel_noise] + input_tensors
            velocities = tf.concat(input_tensors, axis=2)
            initial_conds = utils.encode_initial_conditions(
                init_pos, init_hd, place_cell_ensembles,
                head_direction_ensembles)
            ensembles_targets = utils.encode_targets(target_pos, target_hd,
                                                     place_cell_ensembles,
                                                     head_direction_ensembles)
            mb_res = train_step(velocities, ensembles_targets, initial_conds)
            #res = utils.concat_dict(res, mb_res)

            if batch % 1000 > 600:
                pos_xy.append(mb_res['pos_xy'])
                bottleneck.append(mb_res['bottleneck'])
                lstm_output.append(mb_res['lstm_output'])

            if batch % 1000 == 0 and batch != 0:
                print(preprocess_time)
                print('Epoch {}, batch {}, loss {}'.format(
                    epoch, batch, train_loss.result()))
                for i in range(len(pos_xy)):
                    mb_res = {
                        "bottleneck": bottleneck[i],
                        "lstm_out": lstm_output[i],
                        "pos_xy": pos_xy[i]
                    }
                    utils.concat_dict(res, mb_res)
                pos_xy = []
                bottleneck = []
                lstm_output = []
                mb_res = dict()
                # Store at the end of validation
                filename = 'rates_and_sac_latest_hd.pdf'
                grid_scores['btln_60'], grid_scores['btln_90'], grid_scores[
                    'btln_60_separation'], grid_scores[
                        'btln_90_separation'] = utils.get_scores_and_plot(
                            latest_epoch_scorer, res['pos_xy'],
                            res['bottleneck'],
                            FLAGS['saver_results_directory'], filename)
                res = dict()