예제 #1
0
def get_test_losses(opts, sample, output, name='loss'):
    """Get testing loss funcion
  Input:
  - opts (options) - object with all relevant options stored
  - sample (dict) - sample from training dataset
  - output (tf.Tensor) - output from network on sample
  - name (string, optional) - name prefix for tensorflow scoping (default loss)
  Output:
  - gt_l1_loss (tf.Tensor) - L1 loss against ground truth 
  - gt_l2_loss (tf.Tensor) - L2 loss against ground truth
  - gt_bce_loss (tf.Tensor) - BCE loss against ground truth
  - ssame_m (tf.Tensor) - Mean similarity of corresponding points
  - ssame_var (tf.Tensor) - Standard dev. of similarity of corresponding points
  - sdiff_m (tf.Tensor) - Mean similarity of non-corresponding points
  - sdiff_var (tf.Tensor) - Standard dev. of similarity of non-corresponding
                            points 
  """
    emb = sample['TrueEmbedding']
    output_sim = tfutils.get_sim(output)
    sim_true = tfutils.get_sim(emb)
    if opts.loss_type == 'bce':
        osim = tf.sigmoid(output_sim)
        osim_log = output_sim
    else:
        osim = output_sim
        osim_log = tf.log(tf.abs(output_sim) + 1e-9)
    gt_l1_loss = loss_fns['l1'](sim_true, osim, add_loss=False)
    gt_l2_loss = loss_fns['l2'](sim_true, osim, add_loss=False)
    gt_bce_loss = loss_fns['bce'](sim_true, osim, add_loss=False)
    num_same = tf.reduce_sum(sim_true)
    num_diff = tf.reduce_sum(1 - sim_true)
    ssame_m, ssame_var = tf.nn.weighted_moments(osim, None, sim_true)
    sdiff_m, sdiff_var = tf.nn.weighted_moments(osim, None, 1 - sim_true)

    return gt_l1_loss, gt_l2_loss, gt_bce_loss, ssame_m, ssame_var, sdiff_m, sdiff_var
예제 #2
0
def get_loss(opts, sample, output, return_gt=False, name='train'):
  """Get total loss funcion with main loss functions and regularizers
  Input:
  - opts (options) - object with all relevant options stored
  - sample (dict) - sample from training dataset
  - output (tf.Tensor) - output from network on sample
  - return_gt (boolean, optional) - return ground truth losses (default False)
  - name (string, optional) - name prefix for tensorflow scoping (default train)
  Output:
  - loss (tf.Tensor) - total summed loss value
  - gt_l1_loss (tf.Tensor) - L1 loss against ground truth (only returned
                             if return_gt=True)
  - gt_l2_loss (tf.Tensor) - L2 loss against ground truth (only returned
                             if return_gt=True)
  """
  emb = sample['TrueEmbedding']
  output_sim = tfutils.get_sim(output)
  if opts.use_abs_value:
    output_sim = tf.abs(output_sim)
  sim_true = tfutils.get_sim(emb)
  # Figure out if we are using unsupervised to know if we can use GT Adjcency
  if opts.use_unsupervised_loss:
    v = opts.dataset_params.views[-1]
    p = opts.dataset_params.points[-1]
    b = opts.batch_size 
    sim = sample['AdjMat'] + tf.eye(num_rows=v*p, batch_shape=[b])
  else:
    sim = sim_true
  if opts.full_tensorboard: # Add to tensorboard
    tf.summary.image('Output Similarity {}'.format(name), tf.expand_dims(output_sim, -1))
    tf.summary.image('Embedding Similarity {}'.format(name), tf.expand_dims(sim, -1))
  # Our main loss, the reconstruction loss
  reconstr_loss = loss_fns[opts.loss_type](sim, output_sim)
  if opts.full_tensorboard: # Add to tensorboard
    tf.summary.scalar('Reconstruction Loss {}'.format(name), reconstr_loss)
  # Geometric loss terms and tensorboard
  if opts.geometric_loss > 0:
    geo_loss = get_geometric_loss(opts, sample, output_sim, name='geom_loss_{}'.format(name))
    if opts.full_tensorboard: # Add to tensorboard
      tf.summary.scalar('Geometric Loss {}'.format(name), geo_loss)
      geo_loss_gt = get_geometric_loss(opts, sample, sim_true)
      tf.summary.scalar('Geometric Loss GT {}'.format(name), geo_loss_gt)
    loss = opts.reconstruction_loss * reconstr_loss + opts.geometric_loss * geo_loss
  else:
    loss = reconstr_loss
  tf.summary.scalar('Total Loss {}'.format(name), loss)
  # Compare to loss vs ground truth (almost always do that)
  if return_gt:
    output_sim_gt = output_sim
    if opts.loss_type == 'bce':
      output_sim_gt = tf.sigmoid(output_sim)
    gt_l1_loss = loss_fns['l1'](sim_true, output_sim_gt, add_loss=False)
    gt_l2_loss = loss_fns['l2'](sim_true, output_sim_gt, add_loss=False)
    if opts.full_tensorboard and opts.use_unsupervised_loss: # Add to tensorboard
      tf.summary.scalar('GT L1 Loss {}'.format(name), gt_l1_loss)
      tf.summary.scalar('GT L2 Loss {}'.format(name), gt_l2_loss)
    return loss, gt_l1_loss, gt_l2_loss
  else:
    return loss
예제 #3
0
 def get_output_sim(self, out_graph):
     out_shape = [self.batch_size, -1, self.final_embedding_dim]
     output = tf.reshape(out_graph.nodes, out_shape)
     output_sim = tfutils.get_sim(output)
     if self.use_abs_value:
         output_sim = tf.abs(output_sim)
     return output_sim
def get_loss(opts, sample, output, return_gt=False, name='train'):
  emb = sample['TrueEmbedding']
  output_sim = tfutils.get_sim(output)
  if opts.use_end_bias:
    end_bias = get_end_bias()
    output_sim = output_sim + end_bias
  if opts.use_abs_value:
    output_sim = tf.abs(output_sim)
  sim_true = tfutils.get_sim(emb)
  if opts.use_unsupervised_loss:
    v = opts.dataset_params.views[-1]
    p = opts.dataset_params.points[-1]
    b = opts.batch_size 
    sim = sample['AdjMat'] + tf.eye(num_rows=v*p, batch_shape=[b])
  else:
    sim = sim_true
  if opts.full_tensorboard:
    tf.summary.image('Output Similarity {}'.format(name), tf.expand_dims(output_sim, -1))
    tf.summary.image('Embedding Similarity {}'.format(name), tf.expand_dims(sim, -1))
  reconstr_loss = loss_fns[opts.loss_type](sim, output_sim)
  if opts.full_tensorboard:
    tf.summary.scalar('Reconstruction Loss {}'.format(name), reconstr_loss)
  if opts.geometric_loss > 0:
    geo_loss = get_geometric_loss(opts, sample, output_sim, name='geom_loss_{}'.format(name))
    if opts.full_tensorboard:
      tf.summary.scalar('Geometric Loss {}'.format(name), geo_loss)
      geo_loss_gt = get_geometric_loss(opts, sample, sim_true)
      tf.summary.scalar('Geometric Loss GT {}'.format(name), geo_loss_gt)
    loss = opts.reconstruction_loss * reconstr_loss + opts.geometric_loss * geo_loss
  else:
    loss = reconstr_loss
  tf.summary.scalar('Total Loss {}'.format(name), loss)
  if return_gt:
    output_sim_gt = output_sim
    if opts.loss_type == 'bce':
      output_sim_gt = tf.sigmoid(output_sim)
    gt_l1_loss = loss_fns['l1'](sim_true, output_sim_gt, add_loss=False)
    gt_l2_loss = loss_fns['l2'](sim_true, output_sim_gt, add_loss=False)
    if opts.full_tensorboard and opts.use_unsupervised_loss:
      tf.summary.scalar('GT L1 Loss {}'.format(name), gt_l1_loss)
      tf.summary.scalar('GT L2 Loss {}'.format(name), gt_l2_loss)
    return loss, gt_l1_loss, gt_l2_loss
  else:
    return loss
예제 #5
0
def get_test_losses(opts, sample, output, return_gt=False, name='loss'):
    emb = sample['TrueEmbedding']
    output_sim = tfutils.get_sim(output)
    sim_true = tfutils.get_sim(emb)
    if opts.use_end_bias:
        end_bias = get_end_bias()
        output_sim = output_sim + end_bias
    if opts.loss_type == 'bce':
        osim = tf.sigmoid(output_sim)
        osim_log = output_sim
    else:
        osim = output_sim
        osim_log = tf.log(tf.abs(output_sim) + 1e-9)
    gt_l1_loss = loss_fns['l1'](sim_true, osim, add_loss=False)
    gt_l2_loss = loss_fns['l2'](sim_true, osim, add_loss=False)
    gt_bce_loss = loss_fns['bce'](sim_true, osim, add_loss=False)
    num_same = tf.reduce_sum(sim_true)
    num_diff = tf.reduce_sum(1 - sim_true)
    ssame_m, ssame_var = tf.nn.weighted_moments(osim, None, sim_true)
    sdiff_m, sdiff_var = tf.nn.weighted_moments(osim, None, 1 - sim_true)

    return gt_l1_loss, gt_l2_loss, gt_bce_loss, ssame_m, ssame_var, sdiff_m, sdiff_var
예제 #6
0
print("network")
network = MLPInteractiveNetwork()
print(network)

print("data")
dataset = data_util.datasets.get_dataset(opts)
sample = dataset.load_batch('train')
print(sample.keys())
print(sample['true_match'])

print("output")
output = network(sample['graph'])
print(output.nodes)
output_batch = tf.reshape(output.nodes, [ opts.batch_size, -1, 25 ])
print(output_batch)
output_sim = tfutils.get_sim(output_batch)
print(output_sim)
diff = tf.sparse_add(-output_sim, sample['adj_mat'])
loss = tf.reduce_mean(tf.abs(diff))

b = opts.batch_size
v = opts.dataset_params.views[-1]
p = opts.dataset_params.points[-1]

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
  pass
  init=tf.global_variables_initializer()
  sess.run(init)
  # for i in range(opts.dataset_params.sizes['train'] // opts.batch_size):