Esempio n. 1
0
  def _full_batch_training_op(self, inputs, cluster_idx_list, cluster_centers):
    """Creates an op for training for full batch case.

    Args:
      inputs: list of input Tensors.
      cluster_idx_list: A vector (or list of vectors). Each element in the
        vector corresponds to an input row in 'inp' and specifies the cluster id
        corresponding to the input.
      cluster_centers: Tensor Ref of cluster centers.

    Returns:
      An op for doing an update of mini-batch k-means.
    """
    cluster_sums = []
    cluster_counts = []
    epsilon = tf.constant(1e-6, dtype=inputs[0].dtype)
    for inp, cluster_idx in zip(inputs, cluster_idx_list):
      with ops.colocate_with(inp):
        cluster_sums.append(tf.unsorted_segment_sum(inp,
                                                    cluster_idx,
                                                    self._num_clusters))
        cluster_counts.append(tf.unsorted_segment_sum(
            tf.reshape(tf.ones(tf.reshape(tf.shape(inp)[0], [-1])), [-1, 1]),
            cluster_idx,
            self._num_clusters))
    with ops.colocate_with(cluster_centers):
      new_clusters_centers = tf.add_n(cluster_sums) / (
          tf.cast(tf.add_n(cluster_counts), cluster_sums[0].dtype) + epsilon)
      if self._clusters_l2_normalized():
        new_clusters_centers = tf.nn.l2_normalize(new_clusters_centers, dim=1)
    return tf.assign(cluster_centers, new_clusters_centers)
def data_group_avg(group_ids, data):
    # Sum each group
    sum_total = tf.unsorted_segment_sum(data, group_ids, 3)
    # Count each group
    num_total = tf.unsorted_segment_sum(tf.ones_like(data), group_ids, 3)
    # Calculate average
    avg_by_group = sum_total/num_total
    return(avg_by_group)
Esempio n. 3
0
def get_eval(logits, labels):
    with tf.variable_scope('loss2') as scope:
        logits = tf.reshape(logits, [-1, 21], name='logits2d')
        labels = tf.reshape(labels, [-1], name='labels1d')
        y_sotfmax = tf.nn.softmax(logits, name='softmax1d')
        predictions = tf.argmax(y_sotfmax, 1)
        correct_pred = tf.to_float(tf.equal(labels, predictions))
        ones = tf.ones_like(labels)
        eval_count = tf.to_float(tf.unsorted_segment_sum(ones, labels, 21))
        eval_correct = tf.to_float(tf.unsorted_segment_sum(correct_pred, labels, 21))
    return eval_count, eval_correct
Esempio n. 4
0
def kMeansTF(data, center, nMaxIter, th): # data: nDim x nData, center: nDim x  nCenter
    """Clustering data using the kMeans method implemented with tensorflow.

    :param data: 2D matrix as data input with dimensions: nDim x nData.
    :type data: numpy array.
    :param center: 2D matrix with initial cluster centers with dimensions: nDim x nCenter.
    :type center: numpy array.
    :param nMaxIter: Maximum number of iterations.
    :type nMaxIter: int.
    :param th: Threshold applied to RMS error between prior and current cluster centers.
    :type th: float.
    :return 2D matrix with computed cluster centers with dimensions> nDim x nCenter.
    """
    nData   = data.shape[1]
    nCenter = center.shape[1]
    center  = tf.Variable(center)

    # Replicate data to have the dimensions: nDim x nData x nCenter
    rData       = tf.tile(tf.expand_dims(data,-1),[1, 1, nCenter]) # replicate for nCenter
    rCenter     = tf.transpose(tf.tile(tf.expand_dims(center,-1),[1, 1, nData]),perm=[0, 2, 1]) # replicate for nData

    # Get the cluster center of minimum distance for each data point.
    ssq         = tf.reduce_sum(tf.square(rData - rCenter), 0, keep_dims=True) # over nDim
    index       = tf.squeeze(tf.argmin(ssq, 2)) # min index over nCenter and remove leading dimension

    # Compute the new cluster centers based on the closest data points.
    newSum      = tf.unsorted_segment_sum(tf.transpose(data,[1,0]), index, nCenter)
    count       = tf.unsorted_segment_sum(tf.transpose(tf.ones_like(data),[1,0]), index, nCenter)
    newCenter   = tf.transpose(newSum / count,[1,0])

    # Compute the differences between the new and old cluster centers and threshold them.
    rms             = tf.reduce_sum(tf.sqrt(tf.reduce_sum((center-newCenter)*(center-newCenter), 0)), 0)
    changeCenter    = rms > th

    # Update the cluster centers if they have changed by more than the threshold value.
    with tf.control_dependencies([changeCenter]):
        doUpdates = center.assign(newCenter)

    # Initialize the tensor variables.
    init = tf.initialize_all_variables()
    sess = tf.Session()
    sess.run(init)

    # As long as there are enough changes in the cluster centers and we have not reached the maximum number of
    # iterations, repeat the steps from above.
    changed = True
    iter    = 0
    while changed and iter < nMaxIter:
        iter += 1
        [changed, _] = sess.run([changeCenter, doUpdates])

    return sess.run(center)
    def adloss(self,x,xt,y,global_step):
        with tf.variable_scope('reuse_inference') as scope:
	    scope.reuse_variables()
            self.inference(x,training=True)
	    source_feature=self.feature
            scope.reuse_variables()
            self.inference(xt,training=True)
	    target_feature=self.feature
	    target_pred=self.output
        with tf.variable_scope('reuse') as scope:
            source_logits,_=D(source_feature)
            scope.reuse_variables()
            target_logits,_=D(target_feature)

	self.source_feature=source_feature
	self.target_feature=target_feature
	self.concat_feature=tf.concat([source_feature,target_feature],0)	
        source_result=tf.argmax(y,1)
        target_result=tf.argmax(target_pred,1)
        ones=tf.ones_like(source_feature)
        current_source_count=tf.unsorted_segment_sum(ones,source_result,self.num_classes)
        current_target_count=tf.unsorted_segment_sum(ones,target_result,self.num_classes)

        current_positive_source_count=tf.maximum(current_source_count,tf.ones_like(current_source_count))
        current_positive_target_count=tf.maximum(current_target_count,tf.ones_like(current_target_count))

        current_source_centroid=tf.divide(tf.unsorted_segment_sum(data=source_feature,segment_ids=source_result,num_segments=self.num_classes),current_positive_source_count)
        current_target_centroid=tf.divide(tf.unsorted_segment_sum(data=target_feature,segment_ids=target_result,num_segments=self.num_classes),current_positive_target_count)

        decay=tf.constant(0.3)
        self.decay=decay

        target_centroid=(decay)*current_target_centroid+(1.-decay)*self.target_moving_centroid
        source_centroid=(decay)*current_source_centroid+(1.-decay)*self.source_moving_centroid
	
        self.Semanticloss=protoloss(source_centroid,target_centroid)
	tf.summary.scalar('semanticloss',self.Semanticloss)

        D_real_loss=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=target_logits,labels=tf.ones_like(target_logits)))
        D_fake_loss=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=source_logits,labels=tf.zeros_like(source_logits)))
        self.D_loss=D_real_loss+D_fake_loss
        self.G_loss=-self.D_loss
	tf.summary.scalar('G_loss',self.G_loss)
	tf.summary.scalar('JSD',self.G_loss/2+math.log(2))
	
        self.G_loss=0.1*self.G_loss
	self.D_loss=0.1*self.D_loss
	return self.G_loss,self.D_loss,source_centroid,target_centroid
Esempio n. 6
0
  def _grad_variance(self):
    """Estimate of gradient Variance.

    Returns:
      C_t ops.
    """
    grad_var_ops = []
    tensor_to_avg = []
    for t, g in zip(self._vars, self._grad):
      if isinstance(g, tf.IndexedSlices):
        tensor_to_avg.append(
            tf.reshape(tf.unsorted_segment_sum(g.values,
                                               g.indices,
                                               g.dense_shape[0]),
                       shape=t.get_shape()))
      else:
        tensor_to_avg.append(g)
    avg_op = self._moving_averager.apply(tensor_to_avg)
    grad_var_ops.append(avg_op)
    with tf.control_dependencies([avg_op]):
      self._grad_avg = [self._moving_averager.average(val)
                        for val in tensor_to_avg]
      self._grad_avg_squared = [tf.square(val) for val in self._grad_avg]

    # Compute Variance
    self._grad_var = tf.maximum(
        tf.constant(1e-6, dtype=self._grad_norm_squared_avg.dtype),
        self._grad_norm_squared_avg
        - tf.add_n([tf.reduce_sum(val) for val in self._grad_avg_squared]))
    if self._sparsity_debias:
      self._grad_var *= self._sparsity_avg
    return grad_var_ops  # C_t
 def testValues(self):
   dtypes = [tf.float32,
             tf.float64,
             tf.int64,
             tf.int32,
             tf.complex64,
             tf.complex128]
   indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
   num_segments = 12
   for indices in indices_flat, indices_flat.reshape(5, 2):
     shape = indices.shape + (2,)
     for dtype in dtypes:
       with self.test_session(use_gpu=False):
         tf_x, np_x = self._input(shape, dtype=dtype)
         np_ans = self._segmentReduce(indices,
                                      np_x,
                                      np.add,
                                      op2=None,
                                      num_out_rows=num_segments)
         s = tf.unsorted_segment_sum(data=tf_x,
                                     segment_ids=indices,
                                     num_segments=num_segments)
         tf_ans = s.eval()
       self._assertAllClose(indices, np_ans, tf_ans)
       self.assertShapeEqual(np_ans, s)
 def testBadIndices(self):
   with self.test_session():
     for bad in [[-1]], [[7]]:
       unsorted = tf.unsorted_segment_sum([[17]], bad, num_segments=2)
       with self.assertRaisesOpError(
           r"segment_ids\[0,0\] = %d is out of range \[0, 2\)" % bad[0][0]):
         unsorted.eval()
 def testGradientMatchesSegmentSum(self):
   # Strategy: compute the gradient for UnsortedSegmentSum and SegmentSum
   # and compare the outputs, which should be identical.
   # NB: for this test to work, indices must be valid for SegmentSum, namely
   # it must be sorted, the indices must be contiguous, and num_segments
   # must be max(indices) + 1.
   indices = [0, 0, 1, 1, 1, 2, 3, 4, 5]
   n = len(indices)
   num_cols = 2
   shape = [n, num_cols]
   num_segments = max(indices) + 1
   with self.test_session():
     tf_x, np_x = self._input(shape, dtype=tf.float64)
     # Results from UnsortedSegmentSum
     unsorted_s = tf.unsorted_segment_sum(data=tf_x,
                                                segment_ids=indices,
                                                num_segments=num_segments)
     unsorted_jacob_t, unsorted_jacob_n = gradient_checker.ComputeGradient(
         tf_x, shape, unsorted_s, [num_segments, num_cols],
         x_init_value=np_x.astype(np.double),
         delta=1)
     # Results from SegmentSum
     sorted_s = tf.segment_sum(data=tf_x, segment_ids=indices)
     sorted_jacob_t, sorted_jacob_n = gradient_checker.ComputeGradient(
         tf_x, shape, sorted_s, [num_segments, num_cols],
         x_init_value=np_x.astype(np.double),
         delta=1)
   self.assertAllClose(unsorted_jacob_t, sorted_jacob_t, rtol=1e-3, atol=1e-3)
   self.assertAllClose(unsorted_jacob_n, sorted_jacob_n, rtol=1e-3, atol=1e-3)
def EmbeddingLookupFeatures(params, sparse_features, allow_weights):
  """Computes embeddings for each entry of sparse features sparse_features.

  Args:
    params: list of 2D tensors containing vector embeddings
    sparse_features: 1D tensor of strings. Each entry is a string encoding of
      dist_belief.SparseFeatures, and represents a variable length list of
      feature ids, and optionally, corresponding weights values.
    allow_weights: boolean to control whether the weights returned from the
      SparseFeatures are used to multiply the embeddings.

  Returns:
    A tensor representing the combined embeddings for the sparse features.
    For each entry s in sparse_features, the function looks up the embeddings
    for each id and sums them into a single tensor weighing them by the
    weight of each id. It returns a tensor with each entry of sparse_features
    replaced by this combined embedding.
  """
  if not isinstance(params, list):
    params = [params]
  # Lookup embeddings.
  sparse_features = tf.convert_to_tensor(sparse_features)
  indices, ids, weights = gen_parser_ops.unpack_sparse_features(sparse_features)
  embeddings = tf.nn.embedding_lookup(params, ids)

  if allow_weights:
    # Multiply by weights, reshaping to allow broadcast.
    broadcast_weights_shape = tf.concat(0, [tf.shape(weights), [1]])
    embeddings *= tf.reshape(weights, broadcast_weights_shape)

  # Sum embeddings by index.
  return tf.unsorted_segment_sum(embeddings, indices, tf.size(sparse_features))
Esempio n. 11
0
    def make_task_output_model(
        self,
        placeholders: Dict[str, tf.Tensor],
        model_ops: Dict[str, tf.Tensor],
    ) -> None:
        placeholders['graph_nodes_list'] = \
            tf.placeholder(dtype=tf.int32, shape=[None], name='graph_nodes_list')
        placeholders['target_values'] = \
            tf.placeholder(dtype=tf.float32, shape=[len(self.params['task_ids']), None], name='target_values')
        placeholders['out_layer_dropout_keep_prob'] = \
            tf.placeholder(dtype=tf.float32, shape=[], name='out_layer_dropout_keep_prob')

        task_metrics = {}
        losses = []
        final_node_feature_size = model_ops[
            'final_node_representations'].shape.as_list()[-1]
        for (internal_id, task_id) in enumerate(self.params['task_ids']):
            with tf.variable_scope("out_layer_task%i" % task_id):
                with tf.variable_scope("regression_gate"):
                    regression_gate = \
                        MLP(self.initial_node_feature_size + final_node_feature_size, 1, [],
                            placeholders['out_layer_dropout_keep_prob'])
                with tf.variable_scope("regression"):
                    regression_transform = \
                        MLP(final_node_feature_size, 1, [],
                            placeholders['out_layer_dropout_keep_prob'])

                per_node_outputs = regression_transform(
                    model_ops['final_node_representations'])
                gate_input = tf.concat([
                    model_ops['final_node_representations'],
                    model_ops['initial_node_features']
                ],
                                       axis=-1)
                per_node_gated_outputs = tf.nn.sigmoid(
                    regression_gate(gate_input)) * per_node_outputs

                # Sum up all nodes per-graph
                per_graph_outputs = tf.unsorted_segment_sum(
                    data=per_node_gated_outputs,
                    segment_ids=placeholders['graph_nodes_list'],
                    num_segments=placeholders['num_graphs'])
                per_graph_outputs = tf.squeeze(per_graph_outputs)  # [g]

                per_graph_errors = per_graph_outputs - placeholders[
                    'target_values'][internal_id, :]
                task_metrics['abs_err_task%i' % task_id] = tf.reduce_sum(
                    tf.abs(per_graph_errors))
                tf.summary.scalar(
                    'mae_task%i' % task_id,
                    task_metrics['abs_err_task%i' % task_id] /
                    tf.cast(placeholders['num_graphs'], tf.float32))
                losses.append(tf.reduce_mean(0.5 *
                                             tf.square(per_graph_errors)))
        model_ops['task_metrics'] = task_metrics
        model_ops['task_metrics']['loss'] = tf.reduce_sum(losses)
        model_ops['task_metrics'][
            'total_loss'] = model_ops['task_metrics']['loss'] * tf.cast(
                placeholders['num_graphs'], tf.float32)
Esempio n. 12
0
def _get_loss(features, dipole, charge, model_params):
    metrics = {}  # Not editting features here for safety, use a separate dict

    d_pred = dipole
    d_data = features['d_data']
    d_data *= model_params['d_scale']
    if model_params['max_dipole']:
        # should get the mask here since max_dipole refers to total dipole
        d_mask = tf.abs(d_data) > model_params['max_dipole']

    d_error = dipole - d_data
    metrics['d_data'] = d_data
    metrics['d_pred'] = d_pred
    metrics['d_error'] = d_error
    metrics['q_data'] = tf.zeros_like(charge)
    metrics['q_pred'] = charge
    metrics['q_error'] = charge

    if model_params['log_d_per_atom'] or model_params['use_d_per_atom']:
        ind_1 = features['ind_1']
        atom_count = tf.unsorted_segment_sum(tf.ones_like(ind_1, tf.float32),
                                             ind_1,
                                             tf.shape(d_data)[0])
        d_pred_per_atom = d_pred / atom_count
        d_data_per_atom = d_data / atom_count
        d_error_per_atom = d_error / atom_count
        metrics['d_data_per_atom'] = d_data_per_atom
        metrics['d_pred_per_atom'] = d_pred_per_atom
        metrics['d_error_per_atom'] = d_error_per_atom

    # e_error is ajusted from here
    if model_params['use_d_per_atom']:
        d_error = d_error_per_atom
        if model_params['use_d_per_sqrt']:
            d_error = d_error_per_atom * tf.sqrt(atom_count)
    if model_params['use_d_weight']:
        # Add this to metrics so that one can get a weighed RMSE
        metrics['d_weight'] = features['d_weight']
        d_error *= features['d_weight']
    if model_params['max_dipole']:
        d_error = tf.where(d_mask, tf.zeros_like(d_error), d_error)
    # keep the per_sample loss so that it can be consumed by tf.metrics.mean
    d_loss = d_error**2 * model_params['d_loss_multiplier']
    q_loss = charge**2
    metrics['d_loss'] = d_loss
    tot_loss = tf.reduce_mean(d_loss) + tf.reduce_mean(q_loss)

    if model_params['use_l2']:
        tvars = tf.trainable_variables()
        l2_loss = tf.add_n([
            tf.nn.l2_loss(v) for v in tvars
            if ('bias' not in v.name and 'D_OUT' not in v.name)
        ])
        l2_loss = l2_loss * model_params['l2_loss_multiplier']
        metrics['l2_loss'] = l2_loss
        tot_loss += l2_loss

    metrics['tot_loss'] = tot_loss
    return tot_loss, metrics
    def create_model(self, num_samples, dim, k):
        with self.graph.as_default():
            X = tf.placeholder(tf.float32, [num_samples, dim])
            cluster_membership = tf.Variable(tf.zeros([num_samples]), dtype=tf.float32)
            centroids = tf.Variable(tf.random_uniform([k,dim]), dtype=tf.float32)
            X_temp = tf.reshape(tf.tile(X, [1,k]), [num_samples, k, dim])
            centroids_temp = tf.reshape(tf.tile(centroids,[num_samples,1]), [num_samples, k, dim])

            distances_to_centroids = tf.reduce_sum(tf.square(tf.subtract(X_temp, centroids_temp)), reduction_indices=2)  #N x k x 1
            cluster_membership = tf.arg_min(distances_to_centroids, 1) 
            #distance-minimizing column for each row 

            new_means_numerator = tf.unsorted_segment_sum(X, cluster_membership, k)
            new_means_denominator = tf.unsorted_segment_sum(tf.ones_like(X), cluster_membership, k)
            new_means = new_means_numerator/new_means_denominator
            update_centroids = centroids.assign(new_means)
            return update_centroids, cluster_membership, X
Esempio n. 14
0
    def perform_pooling(self, last_h):
        #  By default, it simply sums up the node embeddings
        #  We do not assume sorted segment_ids
        graph_node_sums = tf.unsorted_segment_sum(data=last_h,  # [v x h]
                                                  segment_ids=self.placeholders['graph_nodes_list'],
                                                  num_segments=self.placeholders['num_graphs'])  # [g x h]

        return graph_node_sums
Esempio n. 15
0
def k_means_cluster(points, k, first_centroids=None, predict_method=None):
    max_iters = 100
    N, D = points.shape
    K = k  # 被聚为K类
    # 初始聚类中心……
    centroids = tf.Variable(points[sample(range(N), K)]
                            if first_centroids is None else first_centroids)
    # 样本归属聚类中心……
    cluster_assignments = tf.Variable(tf.zeros([N], dtype=tf.int64))
    # 同时计算所有样本与聚类中心的距离……
    rep_points = tf.reshape(tf.tile(points, [1, K]), [N, K, D])
    rep_centroids = tf.reshape(tf.tile(centroids, [N, 1]), [N, K, D])
    sum_squares = tf.reduce_sum(tf.square(rep_points - rep_centroids),
                                reduction_indices=2)

    # 样本对应的聚类中心索引……
    best_centroids = tf.argmin(sum_squares, 1)
    # 新聚类中心对应的样本索引……
    centroids_indies = tf.argmin(sum_squares, 0)

    # 按照`best_centroids`中相同的索引,将points求和……
    total = tf.unsorted_segment_sum(points, best_centroids, K)
    # 按照`best_centroids`中相同的索引,将points计数……
    count = tf.unsorted_segment_sum(tf.ones_like(points), best_centroids, K)
    # 以均值作为新聚类中心的值……
    means = total / count

    did_assignments_change = tf.reduce_any(
        tf.not_equal(best_centroids, cluster_assignments))

    with tf.control_dependencies([did_assignments_change]):
        do_updates = tf.group(centroids.assign(means),
                              cluster_assignments.assign(best_centroids))
    init = tf.initialize_all_variables()

    sess = tf.Session()
    sess.run(init)

    iters, changed = 0, True
    while changed and iters < max_iters:
        iters += 1
        [changed, _] = sess.run([did_assignments_change, do_updates])

    [centers, cindies, assignments
     ] = sess.run([centroids, centroids_indies, cluster_assignments])
    return iters, centers, assignments
Esempio n. 16
0
 def testBadIndices(self):
     with self.test_session():
         for bad in [[-1]], [[7]]:
             unsorted = tf.unsorted_segment_sum([[17]], bad, num_segments=2)
             with self.assertRaisesOpError(
                     r"segment_ids\[0,0\] = %d is out of range \[0, 2\)" %
                     bad[0][0]):
                 unsorted.eval()
 def _compress_indexedslices_grad(self, grad):
     if isinstance(grad, tf.IndexedSlices):
         unique_indices, new_index_position = tf.unique(grad.indices)
         summed_values = tf.unsorted_segment_sum(
             grad.values, new_index_position,
             tf.shape(unique_indices)[0])
         grad = tf.IndexedSlices(summed_values, unique_indices)
     return grad
 def testBadIndices(self):
     # Note: GPU kernel does not return the out-of-range error needed for this
     # test, so this test is marked as cpu-only.
     with self.test_session(use_gpu=False):
         for bad in [[-1]], [[7]]:
             unsorted = tf.unsorted_segment_sum([[17]], bad, num_segments=2)
             with self.assertRaisesOpError(r"segment_ids\[0,0\] = %d is out of range \[0, 2\)" % bad[0][0]):
                 unsorted.eval()
Esempio n. 19
0
    def calculate_sum_of_insignificant_interactions_min_one_for_visible_bins(
            self, cis_b1, cis_rc, trans_b1, trans_rc):

        # based on multi nomial distribution's likelihood optimization! each vi ~ sum of observed reads of each bin
        #  :D no need for division! F will support it...
        bins_cis_sum = tf.unsorted_segment_sum(cis_rc, cis_b1,
                                               self.max_bin_id + 1)
        bins_trans_sum = tf.unsorted_segment_sum(trans_rc, trans_b1,
                                                 self.max_bin_id + 1)
        bins_sum = bins_cis_sum + bins_trans_sum

        visible_bins_ints_sum = tf.maximum(
            tf.ones([], tf.float64), tf.gather(bins_sum, self.active_bin_ids))

        return tf.sparse_to_dense(self.active_bin_ids, [self.max_bin_id + 1],
                                  visible_bins_ints_sum,
                                  default_value=tf.zeros([], dtype=tf.float64))
Esempio n. 20
0
    def build_centroid_based_losses(self):

        source_label = self.slabel
        # target_label = tf.Print(target_label,[tf.reduce_min(target_label),tf.reduce_max(target_label)])
        target_label = tf.argmax(self.tlabel_pred, 1)
        ones = tf.ones_like(self.sfc8)
        current_source_count = tf.unsorted_segment_sum(
            ones, source_label, self.params.num_classes)
        current_target_count = tf.unsorted_segment_sum(
            ones, target_label, self.params.num_classes)

        current_positive_source_count = tf.maximum(
            current_source_count, tf.ones_like(current_source_count))
        current_positive_target_count = tf.maximum(
            current_target_count, tf.ones_like(current_target_count))

        current_source_centroid = tf.divide(tf.unsorted_segment_sum(
            data=self.sfc8, segment_ids=source_label, num_segments=self.params.num_classes), current_positive_source_count)
        current_target_centroid = tf.divide(tf.unsorted_segment_sum(
            data=self.tfc8, segment_ids=target_label, num_segments=self.params.num_classes), current_positive_target_count)

        decay = tf.constant(0.3)

        source_moving_centroid = (
            decay) * current_source_centroid + (1. - decay) * self.source_moving_centroid
        target_moving_centroid = (
            decay) * current_target_centroid + (1. - decay) * self.target_moving_centroid

        self.class_wise_adaptation_loss = self.build_class_wise_adaptation_losses(
            source_moving_centroid, target_moving_centroid)
        self.sintra_loss = self.build_batch_intra_losses(
            source_moving_centroid, self.sfc8, self.slabel)
        self.tintra_loss = self.build_batch_intra_losses(
            source_moving_centroid, self.tfc8, tf.argmax(self.tlabel_pred, 1))
        self.sinter_loss = self.build_batch_inter_losses(
            source_moving_centroid, self.sfc8, self.slabel)
        self.tinter_loss = self.build_batch_inter_losses(
            target_moving_centroid, self.tfc8, tf.argmax(self.tlabel_pred, 1))
        self.tsmooth = self.build_laplacian_regularization(
            source_moving_centroid, self.tfc8, self.tlabel_pred)

        update_src = self.source_moving_centroid.assign(source_moving_centroid)
        update_tar = self.target_moving_centroid.assign(target_moving_centroid)

        return update_src, update_tar
Esempio n. 21
0
def _dipole_model_fn(features, labels, mode, params):
    """Model function for neural network dipoles"""
    if isinstance(params['network'], str):
        network_fn = getattr(pinn.networks, params['network'])
    else:
        network_fn = params['network']

    network_params = params['network_params']
    model_params = default_params.copy()
    model_params.update(params['model_params'])
    pred = network_fn(features, **network_params)
    pred = tf.expand_dims(pred, axis=1)

    ind = features['ind_1']  # ind_1 => id of molecule for each atom
    nbatch = tf.reduce_max(ind) + 1
    charge = tf.unsorted_segment_sum(pred, ind[:, 0], nbatch)

    dipole = pred * features['coord']
    dipole = tf.unsorted_segment_sum(dipole, ind[:, 0], nbatch)
    dipole = tf.sqrt(tf.reduce_sum(dipole**2, axis=1) + 1e-6)
    #charge = charge[:,0]

    if mode == tf.estimator.ModeKeys.TRAIN:
        n_trainable = np.sum(
            [np.prod(v.shape) for v in tf.trainable_variables()])
        print("Total number of trainable variables: {}".format(n_trainable))

        loss, metrics = _get_loss(features, dipole, charge, model_params)
        _make_train_summary(metrics)
        train_op = _get_train_op(loss, model_params)
        return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)

    if mode == tf.estimator.ModeKeys.EVAL:
        loss, metrics = _get_loss(features, dipole, charge, model_params)
        metrics = _make_eval_metrics(metrics)
        return tf.estimator.EstimatorSpec(mode,
                                          loss=loss,
                                          eval_metric_ops=metrics)

    if mode == tf.estimator.ModeKeys.PREDICT:
        pred = pred / model_params['d_scale']
        pred *= model_params['d_unit']

        predictions = {'dipole': dipole, 'charges': tf.expand_dims(pred, 0)}
        return tf.estimator.EstimatorSpec(mode, predictions=predictions)
Esempio n. 22
0
 def testBadIndices(self):
   # Note: GPU kernel does not return the out-of-range error needed for this
   # test, so this test is marked as cpu-only.
   with self.test_session(use_gpu=False):
     for bad in [[-1]], [[7]]:
       unsorted = tf.unsorted_segment_sum([[17]], bad, num_segments=2)
       with self.assertRaisesOpError(
           r"segment_ids\[0,0\] = %d is out of range \[0, 2\)" % bad[0][0]):
         unsorted.eval()
Esempio n. 23
0
def compute_mean(cluster_center, x, label, K, eta):
    """ Compute Mean

    Input:
        x: embedding of size N x D
        label: cluster label of size N X 1
        K: number of clusters
        tf_eps: small constant

    Output:
        cluster_center: cluster center of size K x D
    """
    tf_eps = tf.constant(1.0e-16)
    cluster_size = tf.expand_dims(
        tf.unsorted_segment_sum(tf.ones(label.get_shape()), label, K), 1)
    cluster_center_new = (1 - eta) * tf.unsorted_segment_sum(
        x, label, K) / (cluster_size + tf_eps) + eta * cluster_center
    return cluster_center.assign(cluster_center_new)
Esempio n. 24
0
 def segment_sum(self, data):
     if self.nonzero_guarantee:
         return self.segment_sum_nonzero(data)
     else:
         return tf.unsorted_segment_sum(
             data = data,
             segment_ids = self.segment_indices,
             num_segments = self.segment_num,
         )
Esempio n. 25
0
def cluster_rnn_phn(n_clusters,
                    wav_files,
                    ark_file,
                    hopping_size,
                    window_size,
                    subsample,
                    n_jobs=4):

    feats, uttids = kaldi_io.readArk(ark_file)

    tf.segment_mean()
    #tf.

    #from https://github.com/tensorflow/tensorflow/issues/7389
    ones = tf.ones_like(x)
    count = tf.unsorted_segment_sum(ones, ids, 2)
    sums = tf.unsorted_segment_sum(x, ids, 2)
    mean = tf.divide(sums, count)
Esempio n. 26
0
    def call(self, inputs, training=False):
        '''

        outputs:
            Natural parameter
        '''
        f_ = inputs
        shape = tf.stack([f_['n_links'],self.hparams.link_state_dim-1], axis=0)
        #link_state = tf.zeros(shape)
        link_state = tf.concat([
            tf.expand_dims(f_['capacities'],axis=1),
            tf.zeros(shape)
        ], axis=1)

        shape = tf.stack([f_['n_paths'],self.hparams.path_state_dim-1], axis=0)
        path_state = tf.concat([
            tf.expand_dims(f_['traffic'],axis=1),
            tf.zeros(shape)
        ], axis=1)

        links = f_['links']
        paths = f_['paths']
        seqs=  f_['sequences']
        
        for _ in range(self.hparams.T):
        
            h_ = tf.gather(link_state,links)

            #TODO move this to feature calculation
            ids=tf.stack([paths, seqs], axis=1)            
            max_len = tf.reduce_max(seqs)+1
            shape = tf.stack([f_['n_paths'], max_len, self.hparams.link_state_dim])
            lens = tf.segment_sum(data=tf.ones_like(paths),
                                    segment_ids=paths)

            link_inputs = tf.scatter_nd(ids, h_, shape)
            #TODO move to tf.keras.RNN
            outputs, path_state = tf.nn.dynamic_rnn(self.path_update,
                                                    link_inputs,
                                                    sequence_length=lens,
                                                    initial_state = path_state,
                                                    dtype=tf.float32)
            m = tf.gather_nd(outputs,ids)
            m = tf.unsorted_segment_sum(m, links ,f_['n_links'])

            #Keras cell expects a list
            link_state,_ = self.edge_update(m, [link_state])
            
        if self.hparams.learn_embedding:
            r = self.readout(path_state,training=training)
            o = self.final(tf.concat([r,path_state], axis=1))
            
        else:
            r = self.readout(tf.stop_gradient(path_state),training=training)
            o = self.final(tf.concat([r, tf.stop_gradient(path_state)], axis=1) )
            
        return o
 def testEmptySecondDimension(self):
     dtypes = [np.float32, np.float64, np.int64, np.int32, np.complex64, np.complex128]
     with self.test_session(use_gpu=self.use_gpu):
         for dtype in dtypes:
             for itype in (np.int32, np.int64):
                 data = np.zeros((2, 0), dtype=dtype)
                 segment_ids = np.array([0, 1], dtype=itype)
                 unsorted = tf.unsorted_segment_sum(data, segment_ids, 2)
                 self.assertAllEqual(unsorted.eval(), np.zeros((2, 0), dtype=dtype))
Esempio n. 28
0
def centers_by_label(features, label):
    # Compute centers within batch
    unique_label, unique_idx, unique_count = tf.unique_with_counts(label)
    num_centers = tf.size(unique_label)
    appear_times = tf.gather(unique_count, unique_idx)
    appear_times = tf.reshape(appear_times, [-1, 1])
    weighted_prelogits = features / tf.cast(appear_times, tf.float32)
    centers = tf.unsorted_segment_sum(weighted_prelogits, unique_idx, num_centers)
    return centers, unique_label, unique_idx, unique_count
Esempio n. 29
0
def inference(documents, doc_mask, query, query_mask):
    embedding = tf.get_variable('embedding',
                                [FLAGS.vocab_size, FLAGS.embedding_size],
                                initializer=tf.random_uniform_initializer(minval=-0.05, maxval=0.05))

    regularizer = tf.nn.l2_loss(embedding)

    doc_emb = tf.nn.dropout(tf.nn.embedding_lookup(
        embedding, documents), FLAGS.dropout_keep_prob)
    doc_emb.set_shape([None, None, FLAGS.embedding_size])

    query_emb = tf.nn.dropout(tf.nn.embedding_lookup(
        embedding, query), FLAGS.dropout_keep_prob)
    query_emb.set_shape([None, None, FLAGS.embedding_size])

    with tf.variable_scope('document', initializer=orthogonal_initializer()):
        fwd_cell = tf.nn.rnn_cell.GRUCell(FLAGS.hidden_size)
        back_cell = tf.nn.rnn_cell.GRUCell(FLAGS.hidden_size)

        doc_len = tf.reduce_sum(doc_mask, reduction_indices=1)
        h, _ = tf.nn.bidirectional_dynamic_rnn(
            fwd_cell, back_cell, doc_emb,
            sequence_length=tf.to_int64(doc_len),
            dtype=tf.float32)
        h_doc = tf.concat(2, h)

    with tf.variable_scope('query', initializer=orthogonal_initializer()):
        fwd_cell = tf.nn.rnn_cell.GRUCell(FLAGS.hidden_size)
        back_cell = tf.nn.rnn_cell.GRUCell(FLAGS.hidden_size)

        query_len = tf.reduce_sum(query_mask, reduction_indices=1)
        h, _ = tf.nn.bidirectional_dynamic_rnn(
            fwd_cell, back_cell, query_emb,
            sequence_length=tf.to_int64(query_len),
            dtype=tf.float32)
        h_query = tf.concat(2, h)

    M = tf.batch_matmul(h_doc, h_query, adj_y=True)
    M_mask = tf.to_float(tf.batch_matmul(tf.expand_dims(
        doc_mask, -1), tf.expand_dims(query_mask, 1)))

    alpha = softmax(M, 1, M_mask)
    beta = softmax(M, 2, M_mask)

    query_importance = tf.expand_dims(tf.reduce_sum(
        beta, 1) / tf.to_float(tf.expand_dims(doc_len, -1)), -1)

    s = tf.squeeze(tf.batch_matmul(alpha, query_importance), [2])

    unpacked_s = zip(tf.unpack(s, FLAGS.batch_size),
                     tf.unpack(documents, FLAGS.batch_size))
    y_hat = tf.pack([
        tf.unsorted_segment_sum(attentions, sentence_ids, FLAGS.vocab_size)
        for (attentions, sentence_ids) in unpacked_s
    ])

    return y_hat, regularizer
Esempio n. 30
0
    def build_weights_centroid_based_losses(self):
        source_label = self.slabel
        target_softmax = tf.nn.softmax(self.tlabel_pred, axis=1)
        target_softmax_expand = tf.expand_dims(target_softmax, -1)
        tfc8_expand = tf.expand_dims(self.tfc8, 1)
        ones = tf.ones_like(self.sfc8)
        current_source_count = tf.unsorted_segment_sum(
            ones, source_label, self.params.num_classes)
        current_target_count = tf.reduce_sum(
            target_softmax, axis=0, keep_dims=True)

        current_positive_source_count = tf.maximum(
            current_source_count, tf.ones_like(current_source_count))
        current_positive_target_count = tf.matmul(
            current_target_count, tf.ones([1, tf.shape(self.tfc8)[1]], tf.float32), True)

        current_source_centroid = tf.divide(tf.unsorted_segment_sum(
            data=self.sfc8, segment_ids=source_label, num_segments=self.params.num_classes), current_positive_source_count)
        current_target_centroid = tf.divide(
            tf.reduce_sum(tf.matmul(target_softmax_expand, tfc8_expand), axis=0), current_positive_target_count)

        decay = tf.constant(0.5)

        source_moving_centroid = (
            decay) * current_source_centroid + (1. - decay) * self.source_moving_centroid
        target_moving_centroid = (
            decay) * current_target_centroid + (1. - decay) * self.target_moving_centroid

        self.class_wise_adaptation_loss = self.build_class_wise_adaptation_losses(
            source_moving_centroid, target_moving_centroid)
        self.sintra_loss = self.build_batch_intra_losses(
            source_moving_centroid, self.sfc8, self.slabel)
        self.tintra_loss = self.build_batch_intra_losses(
            source_moving_centroid, self.tfc8, tf.argmax(self.tlabel_pred, 1))
        self.sinter_loss = self.build_batch_inter_losses(
            source_moving_centroid, self.sfc8, self.slabel)
        self.tinter_loss = self.build_batch_inter_losses(
            target_moving_centroid, self.tfc8, tf.argmax(self.tlabel_pred, 1))
        # self.tsmooth = self.build_laplacian_regularization(source_moving_centroid,self.tfc8,self.tlabel_pred)

        update_src = self.source_moving_centroid.assign(source_moving_centroid)
        update_tar = self.target_moving_centroid.assign(target_moving_centroid)

        return update_src, update_tar
Esempio n. 31
0
    def perform_pooling(self, last_h):
        #  By default, it simply sums up the node embeddings
        #  We do not assume sorted segment_ids
        graph_node_sums = tf.unsorted_segment_sum(
            data=last_h,  # [v x h]
            segment_ids=self.placeholders['graph_nodes_list'],
            num_segments=self.placeholders['num_graphs'])  # [g x h]

        if not self.params.get('use_node_pooling_attention', False):
            return graph_node_sums

        mechanism = self.params.get('node_pooling_attention_mechanism',
                                    'default')

        if mechanism == 'default':  # TODO : Get a better name
            node_sum_copies = tf.gather(
                params=graph_node_sums,
                indices=self.placeholders['graph_nodes_list'])

            nodes_with_node_sums = tf.concat([last_h, node_sum_copies], -1)
            intermediate_alignment = tf.layers.dense(
                nodes_with_node_sums,
                units=100,
                activation=tf.nn.relu,
                name='interm_node_pool_attn')
            alignment_scores = tf.layers.dense(intermediate_alignment,
                                               units=1,
                                               name='final_node_pool_attn')

            attention = self.compute_attention_normalization(
                alignment_scores, self.placeholders['graph_nodes_list'],
                self.placeholders['num_graphs'])
            normalized_last_h = attention * last_h
            graph_attention_sums = tf.unsorted_segment_sum(
                data=normalized_last_h,
                segment_ids=self.placeholders['graph_nodes_list'],
                num_segments=self.placeholders['num_graphs'])

            return graph_attention_sums

        else:
            raise NotImplementedError(
                "Node-Pooling attention mechanism {} not implemented".format(
                    mechanism))
    def compute_embeddings(self, embeddings: tf.Tensor) -> tf.Tensor:
        """
        Uses the model layer to process embeddings to new embeddings. All embeddings are in one dimension.
        Propagation is made in one pass with many disconnected graphs.

        Args:
            embeddings: Tensor of shape [v, h].

        Returns:
            Tensor of shape [v, h].
        """
        num_nodes = tf.shape(embeddings, out_type=tf.int32)[0]

        # Get all edge targets (aggregate of typed edges)
        edge_targets = []  # list of tensors of message targets of shape [e]
        for edge_type_idx, adjacency_list_for_edge_type in enumerate(
                self.placeholders['adjacency_lists']):
            edge_targets_for_one_type = adjacency_list_for_edge_type[:, 1]
            edge_targets.append(edge_targets_for_one_type)
        edge_targets = tf.concat(edge_targets, axis=0)  # [M]

        # Propagate
        for step in range(self.config['num_timesteps']):
            messages = []  # list of tensors of messages of shape [e, h]
            message_source_states = [
            ]  # list of tensors of edge source states of shape [e, h]

            # Collect incoming messages per edge type
            for edge_type_idx, adjacency_list_for_edge_type in enumerate(
                    self.placeholders['adjacency_lists']):
                edge_sources = adjacency_list_for_edge_type[:, 0]
                edge_source_states = tf.nn.embedding_lookup(
                    params=embeddings, ids=edge_sources)  # [e, h]
                all_messages_for_edge_type = tf.matmul(
                    edge_source_states, self.state.weights['edge_weights']
                    [edge_type_idx])  # Shape [e, h]
                messages.append(all_messages_for_edge_type)
                message_source_states.append(edge_source_states)

            messages = tf.concat(messages, axis=0)  # [M, h]

            messages = tf.unsorted_segment_sum(
                data=messages,
                segment_ids=edge_targets,
                num_segments=num_nodes)  # [v, h]

            if self.config['use_edge_bias'] == 1:
                embeddings += tf.matmul(
                    self.placeholders['num_incoming_edges_per_type'],
                    self.state.weights['edge_biases'])

            # pass updated vertex features into RNN cell
            embeddings = self.state.weights['rnn_cells'](
                messages, embeddings)[1]  # [v, h]

        return embeddings
Esempio n. 33
0
 def gcn_norm(self, edge_index, num_nodes):
     # add self-loop.
     diagnal_edge_index = tf.stack([tf.range(num_nodes, dtype=tf.int32)] *
                                   2,
                                   axis=0)
     edge_index = tf.concat([edge_index, diagnal_edge_index], axis=1)
     edge_weight = tf.ones(tf.shape(edge_index)[1], dtype=tf.float32)
     deg = tf.unsorted_segment_sum(edge_weight, edge_index[0], num_nodes)
     deg_inv_sqrt = tf.pow(deg, -0.5)
     return edge_index, deg_inv_sqrt
Esempio n. 34
0
def process_grad(grad):
    if grad is not None:
        grad = ops.convert_to_tensor_or_indexed_slices(grad)
        if isinstance(grad, ops.IndexedSlices):
            # In IndexedSlices is not supported in java api, we have to convert it to
            # a dense tensor. This operation is potentially expensive, but there seems
            # no work around
            grad = tf.unsorted_segment_sum(grad.values, grad.indices,
                                           grad.dense_shape[0])
    return grad
Esempio n. 35
0
def apply_factor(tensor, *args, **kwargs):
    scope = kwargs.pop("scope", "")
    with tf.name_scope(scope):
        n_args = len(args)

        if n_args is 0:
            tensor, output_size, error_symbol = tensor
            return one_hot(tensor, output_size, scope=scope)
        else:
            tensor, args = slice_out_int_literals(tensor, list(args))
            args, is_batched = make_batch_consistent(args)
            tensor, output_size, error_symbol = tensor
            
            # handle the case where all arguments were int literals
            tensor_dim_sizes = [dim.value for dim in tensor.get_shape()]
            if not tensor_dim_sizes:
                return one_hot(tensor, output_size, scope=scope)

            # Each arg is batch size x arg dim. Add dimensions to enable broadcasting.
            for i, arg in enumerate(args):
                for j in xrange(n_args):
                    if j == i: continue
                    args[i] = tf.expand_dims(args[i], j + 1)

            # compute joint before tensor is applied
            joint = 1
            for arg in args:
                joint = joint * arg
            
            # prepare for unsorted_segment_sum
            joint = tf.reshape(joint, (-1, np.prod(tensor_dim_sizes)))
            joint = tf.transpose(joint, [1, 0])	 # |tensor| x batch_size

            if error_symbol is not None:
                result = tf.unsorted_segment_sum(joint, tf.reshape(tensor, [-1]), output_size + 1)
                # assume error bin is last bin
                result = result[:output_size, :]
            else:
                result = tf.unsorted_segment_sum(joint, tf.reshape(tensor, [-1]), output_size)

            result = tf.transpose(result, [1, 0])
            if not is_batched: result = tf.squeeze(result)
            return result    
Esempio n. 36
0
def batch_unsrt_segment_sum(data, segment_ids, num_segments):
    """ Performas the `tf.unsorted_segment_sum` operation batch-wise"""
    # create distinct segments per batch
    num_batches = tf.shape(segment_ids, out_type=tf.int64)[0]
    batch_indices = tf.range(num_batches)
    segment_ids_per_batch = segment_ids + num_segments * tf.expand_dims(batch_indices, axis=1)

    # do the normal unsegment sum and reshape to original shape
    seg_sums = tf.unsorted_segment_sum(data, segment_ids_per_batch, num_segments * num_batches)
    return tf.reshape(seg_sums, tf.stack((-1, num_segments)))
Esempio n. 37
0
    def body(index, array):
        hierarchy = tf.gather(hierarchies, index, axis=1)
        __, hierarchy = tf.unique(hierarchy)
        array_hierarchy = tf.unsorted_segment_sum(array_copy,
                                                  hierarchy,
                                                  num_segments=tf.size(
                                                      tf.unique(hierarchy)[0]))

        array = tf.concat([array, array_hierarchy], 0)
        return tf.add(index, 1), array
Esempio n. 38
0
def unsorted_segment_sum_emb(data, segment_ids, num_segments):
    num_rows = tf.shape(segment_ids)[0]
    rows_idx = tf.range(num_rows)
    rows_idx = tf.cast(rows_idx, segment_ids.dtype)
    segment_ids_per_row = segment_ids + num_segments * tf.expand_dims(rows_idx,
                                                                      axis=1)
    seg_sums = tf.unsorted_segment_sum(data, segment_ids_per_row,
                                       num_segments * num_rows)
    result = tf.reshape(seg_sums, [-1, num_segments, tf.shape(data)[-1]])
    return result
Esempio n. 39
0
    def gated_regression(self, last_h, regression_gate, regression_transform):
        # last_h: [v x h]
        gate_input = tf.concat([last_h, self.placeholders['initial_node_representation']], axis=-1)  # [v x 2h]
        gated_outputs = tf.nn.sigmoid(regression_gate(gate_input)) * regression_transform(last_h)  # [v x 1]

        # Sum up all nodes per-graph
        graph_representations = tf.unsorted_segment_sum(data=gated_outputs,
                                                        segment_ids=self.placeholders['graph_nodes_list'],
                                                        num_segments=self.placeholders['num_graphs'])  # [g x 1]
        return tf.squeeze(graph_representations)  # [g]
def sum_effects(E, Rr, bs, no):
    de = E.get_shape().as_list()[1]
    RrSum = tf.reshape(Rr, [-1, Rr.get_shape().as_list()[-1]])
    cum = tf.cumprod([bs, no], axis=-1, reverse=True, exclusive=True)
    RrSum = tf.matmul(RrSum, tf.expand_dims(cum, axis=-1))
    RrSum = tf.reshape(RrSum, [-1])

    E = tf.unsorted_segment_sum(E, RrSum, bs * no)
    E = tf.reshape(E, [bs, no, de])
    return E
Esempio n. 41
0
 def _add_colorfulness_loss(self, generated_img, num_colors=5):
     binned_values = tf.reshape(tf.floor(generated_img * (num_colors - 1)), [-1])
     binned_values = tf.cast(binned_values, tf.int32)
     ones = tf.ones_like(binned_values, dtype=tf.int32)
     histogram = tf.unsorted_segment_sum(ones, binned_values, num_colors)
     _colorfulness_loss = tf.cast(- tf.reduce_max(histogram), tf.float32)
     colorfulness_loss = tf.divide(_colorfulness_loss, tf.cast(tf.size(generated_img), tf.float32),
                                   name="colorfulness_loss")
     tf.losses.add_loss(colorfulness_loss)
     return colorfulness_loss
Esempio n. 42
0
  def __init__(self, requests, expert_capacity):
    """Create a TruncatingDispatcher.

    Args:
      requests: a boolean `Tensor` of shape `[batch, length, num_experts]`.
        Alternatively, a float or int Tensor containing zeros and ones.
      expert_capacity: a Scalar - maximum number of examples per expert per
        batch element.

    Returns:
      a TruncatingDispatcher
    """
    self._requests = tf.to_float(requests)
    self._expert_capacity = expert_capacity
    expert_capacity_f = tf.to_float(expert_capacity)
    self._batch, self._length, self._num_experts = tf.unstack(
        tf.shape(self._requests), num=3)

    # [batch, length, num_experts]
    position_in_expert = tf.cumsum(self._requests, axis=1, exclusive=True)
    # [batch, length, num_experts]
    self._gates = self._requests * tf.to_float(
        tf.less(position_in_expert, expert_capacity_f))
    batch_index = tf.reshape(
        tf.to_float(tf.range(self._batch)), [self._batch, 1, 1])
    length_index = tf.reshape(
        tf.to_float(tf.range(self._length)), [1, self._length, 1])
    expert_index = tf.reshape(
        tf.to_float(tf.range(self._num_experts)), [1, 1, self._num_experts])
    # position in a Tensor with shape [batch * num_experts * expert_capacity]
    flat_position = (
        position_in_expert +
        batch_index * (tf.to_float(self._num_experts) * expert_capacity_f) +
        expert_index * expert_capacity_f)
    # Tensor of shape [batch * num_experts * expert_capacity].
    # each element is an integer in [0, length)
    self._indices = tf.unsorted_segment_sum(
        data=tf.reshape((length_index + 1.0) * self._gates, [-1]),
        segment_ids=tf.to_int32(tf.reshape(flat_position, [-1])),
        num_segments=self._batch * self._num_experts * expert_capacity)
    self._indices = tf.reshape(
        self._indices,
        [self._batch, self._num_experts, expert_capacity])
    # Tensors of shape [batch, num_experts, expert_capacity].
    # each element is 0.0 or 1.0
    self._nonpadding = tf.minimum(self._indices, 1.0)
    # each element is an integer in [0, length)
    self._indices = tf.nn.relu(self._indices - 1.0)
    # self._flat_indices is [batch, num_experts, expert_capacity], with values
    # in [0, batch * length)
    self._flat_indices = tf.to_int32(
        self._indices +
        (tf.reshape(tf.to_float(tf.range(self._batch)), [-1, 1, 1])
         * tf.to_float(self._length)))
    self._indices = tf.to_int32(self._indices)
Esempio n. 43
0
  def __init__(self, requests, expert_capacity):
    """Create a TruncatingDispatcher.

    Args:
      requests: a boolean `Tensor` of shape `[batch, length, num_experts]`.
        Alternatively, a float or int Tensor containing zeros and ones.
      expert_capacity: a Scalar - maximum number of examples per expert per
        batch element.

    Returns:
      a TruncatingDispatcher
    """
    self._requests = tf.to_float(requests)
    self._expert_capacity = expert_capacity
    expert_capacity_f = tf.to_float(expert_capacity)
    self._batch, self._length, self._num_experts = tf.unstack(
        tf.shape(self._requests), num=3)

    # [batch, length, num_experts]
    position_in_expert = tf.cumsum(self._requests, axis=1, exclusive=True)
    # [batch, length, num_experts]
    self._gates = self._requests * tf.to_float(
        tf.less(position_in_expert, expert_capacity_f))
    batch_index = tf.reshape(
        tf.to_float(tf.range(self._batch)), [self._batch, 1, 1])
    length_index = tf.reshape(
        tf.to_float(tf.range(self._length)), [1, self._length, 1])
    expert_index = tf.reshape(
        tf.to_float(tf.range(self._num_experts)), [1, 1, self._num_experts])
    # position in a Tensor with shape [batch * num_experts * expert_capacity]
    flat_position = (
        position_in_expert +
        batch_index * (tf.to_float(self._num_experts) * expert_capacity_f) +
        expert_index * expert_capacity_f)
    # Tensor of shape [batch * num_experts * expert_capacity].
    # each element is an integer in [0, length)
    self._indices = tf.unsorted_segment_sum(
        data=tf.reshape((length_index + 1.0) * self._gates, [-1]),
        segment_ids=tf.to_int32(tf.reshape(flat_position, [-1])),
        num_segments=self._batch * self._num_experts * expert_capacity)
    self._indices = tf.reshape(
        self._indices,
        [self._batch, self._num_experts, expert_capacity])
    # Tensors of shape [batch, num_experts, expert_capacity].
    # each element is 0.0 or 1.0
    self._nonpadding = tf.minimum(self._indices, 1.0)
    # each element is an integer in [0, length)
    self._indices = tf.nn.relu(self._indices - 1.0)
    # self._flat_indices is [batch, num_experts, expert_capacity], with values
    # in [0, batch * length)
    self._flat_indices = tf.to_int32(
        self._indices +
        (tf.reshape(tf.to_float(tf.range(self._batch)), [-1, 1, 1])
         * tf.to_float(self._length)))
    self._indices = tf.to_int32(self._indices)
Esempio n. 44
0
    def get_inertia(self, centroids):
        with tf.name_scope('inertia'):
            labels = self.get_labels(centroids)
            if self.beta is not None:
                X_ = tf.expand_dims(self.X, 2)  # B L 1 E
                centroids_ = tf.expand_dims(centroids, 1)  # B 1 C E
                dist = tf.reduce_sum(
                    tf.square(X_ - centroids_), -1
                ) * labels  # B C distance to clusters according to soft assignments
                dist = tf.reduce_sum(dist, 1)  # Sum over points
                density = tf.reduce_sum(
                    labels, 1)  # Compute the soft density of each cluster
                inertia = tf.divide(dist, density)
                inertia = tf.reduce_sum(inertia, -1)
            else:
                # centroids [b*nb_tries, C, E]
                centroids_flattened = tf.reshape(
                    centroids, [self.B * self.nb_clusters, self.E])

                # Add + [0,1,2]
                idx_flattened = tf.reshape(labels + self.shifting,
                                           [self.B * self.L])  # [b*nb_tries*L]
                X_flattened = tf.reshape(
                    self.X, [self.B * self.L, self.E])  # [b*nb_tries*L, E]

                dist = tf.reduce_sum(
                    tf.square(X_flattened -
                              tf.gather(centroids_flattened, idx_flattened)),
                    -1)

                total = tf.unsorted_segment_sum(dist, idx_flattened,
                                                self.B * self.nb_clusters)
                count = tf.unsorted_segment_sum(tf.ones_like(dist),
                                                idx_flattened,
                                                self.B * self.nb_clusters)

                # [self.B*self.nb_clusters]
                inertia = total / count
                inertia = tf.reshape(inertia, [self.B, self.nb_clusters])
                inertia = tf.reduce_sum(inertia, -1)

            return inertia
Esempio n. 45
0
def MoG_validation(K):
	MoG_valid = mog.MoG("data2D.npy")
	_, X_data, mu, _, sigma_2, log_pi, pi_np = MoG_valid.cluster(K, D, B, 1.0/3.0)
	# _, X_data, mu, _, sigma_2, log_pi, pi_np = MoG_valid.cluster(K, D, B)

	loss_valid = MoG_valid.cal_loss(MoG_valid.validation.astype(np.float32), mu, D, log_pi, sigma_2)
	min_idx = MoG_valid.cal_min_idx(X_data, mu, np.sqrt(sigma_2), pi_np, D)

	data = tf.ones(shape = [B,])
	division = tf.unsorted_segment_sum(data, min_idx, K, name=None)

	data_valid = tf.ones(shape = [(B - (1 - 1/3) * B), ])
	min_idx_valid = MoG_valid.cal_min_idx(MoG_valid.validation.astype(np.float32), mu, np.sqrt(sigma_2), pi_np, D)
	division_valid = tf.unsorted_segment_sum(data_valid, min_idx_valid, K, name = None)

	with tf.Session():
		print 'loss_validation:', loss_valid.eval()
		print 'Total Proportion:', division.eval()/10000

		plot.plot_cluster(min_idx.eval(), X_data, mu, K)
		plot.plot_valid_cluster(min_idx_valid.eval(), MoG_valid.validation, mu, K)
 def testGradient(self):
     num_cols = 2
     indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
     num_segments = max(indices_flat) + 3
     for indices in indices_flat, indices_flat.reshape(5, 2):
         shape = indices.shape + (num_cols,)
         with self.test_session(use_gpu=self.use_gpu):
             tf_x, np_x = self._input(shape, dtype=tf.float64)
             s = tf.unsorted_segment_sum(data=tf_x, segment_ids=indices, num_segments=num_segments)
             jacob_t, jacob_n = tf.test.compute_gradient(
                 tf_x, shape, s, [num_segments, num_cols], x_init_value=np_x.astype(np.double), delta=1
             )
         self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
Esempio n. 47
0
def k_comparison(K):
    D = 2
    B = 10000
                    
    KM = km.k_mean("data2D.npy")
    _, segment_ids, X_data, mu= KM.cluster(K, D, B)
    
    
    data = tf.ones(shape = [B,])
    division = tf.unsorted_segment_sum(data, segment_ids, K, name=None)
    
    with tf.Session():
        print "K =",K,":",division.eval()/10000
        plot.plot_cluster(segment_ids, X_data, mu, K)
def model(x, segmentinds, keep_prob, batchsize, neuronList, activationType,
          fplength, mask, name, dxdxik, tilederiv,element):
    """Generates a multilayer neural network with variable number
    of neurons, so that we have a template for each atom's NN."""

    nNeurons = neuronList[0]
    # Pass  the input tensors through the first soft-plus layer
    W_fc = weight_variable([fplength, nNeurons], name=name+element)
    b_fc = bias_variable([nNeurons], name=name)
    h_fc = activationType(tf.matmul(x, W_fc) + b_fc)
    #h_fc = tf.nn.dropout(activationType(tf.matmul(x, W_fc) + b_fc),keep_prob)

    if len(neuronList) > 1:
        for i in range(1, len(neuronList)):
            nNeurons = neuronList[i]
            nNeuronsOld = neuronList[i - 1]
            W_fc = weight_variable([nNeuronsOld, nNeurons], name=name)
            b_fc = bias_variable([nNeurons], name=name)
            h_fc = tf.nn.dropout(activationType(
                tf.matmul(h_fc, W_fc) + b_fc), keep_prob)

    W_fc_out = weight_variable([neuronList[-1], 1], name=name)
    b_fc_out = bias_variable([1], name=name)
    y_out = tf.matmul(h_fc, W_fc_out) + b_fc_out

    # Sum the predicted energy for each molecule
    reducedSum = tf.unsorted_segment_sum(y_out, segmentinds, batchsize)

    dEjdgj = tf.gradients(y_out, x)[0]
    dEjdgj1 = tf.expand_dims(dEjdgj, 1)
    dEjdgj2 = tf.expand_dims(dEjdgj1, 1)
    dEjdgjtile = tf.tile(dEjdgj2, tilederiv)
    dEdxik = tf.mul(dxdxik, dEjdgjtile)
    dEdxikReduce = tf.reduce_sum(dEdxik, 3)
    dEdxik_reduced = tf.unsorted_segment_sum(
        dEdxikReduce, segmentinds, batchsize)
    return tf.mul(reducedSum, mask), dEdxik_reduced
Esempio n. 49
0
def accumulate_sparse_gradients(grad):
  """Accumulates repeated indices of a sparse gradient update.

  Args:
    grad: a tf.IndexedSlices gradient

  Returns:
    grad_indices: unique indices
    grad_values: gradient values corresponding to the indices
  """

  grad_indices, grad_segments = tf.unique(grad.indices)
  grad_values = tf.unsorted_segment_sum(grad.values, grad_segments,
                                        tf.shape(grad_indices)[0])
  return grad_indices, grad_values
Esempio n. 50
0
def _rowwise_unsorted_segment_sum(values, indices, n):
  """UnsortedSegmentSum on each row.

  Args:
    values: a `Tensor` with shape `[batch_size, k]`.
    indices: an integer `Tensor` with shape `[batch_size, k]`.
    n: an integer.
  Returns:
    A `Tensor` with the same type as `values` and shape `[batch_size, n]`.
  """
  batch, k = tf.unstack(tf.shape(indices), num=2)
  indices_flat = tf.reshape(indices, [-1]) + tf.div(tf.range(batch * k), k) * n
  ret_flat = tf.unsorted_segment_sum(
      tf.reshape(values, [-1]), indices_flat, batch * n)
  return tf.reshape(ret_flat, [batch, n])
Esempio n. 51
0
 def grad_variance(self):
   grad_var_ops = []
   tensor_to_avg = []
   for t, g in zip(self._tvars, self._grads):
     if isinstance(g, ops.IndexedSlices):
       tensor_to_avg.append(tf.reshape(tf.unsorted_segment_sum(g.values, g.indices, g.dense_shape[0] ), shape=t.get_shape() ) )
     else:
       tensor_to_avg.append(g)
   avg_op = self._moving_averager.apply(tensor_to_avg)
   grad_var_ops.append(avg_op)
   with tf.control_dependencies([avg_op] ):
     self._grad_avg = [self._moving_averager.average(val) for val in tensor_to_avg]
     self._grad_avg_squared = [tf.square(val) for val in self._grad_avg]
   self._grad_var = self._grad_norm_squared_avg - tf.add_n( [tf.reduce_sum(val) for val in self._grad_avg_squared] )
   return grad_var_ops
Esempio n. 52
0
def _deduplicate_indexed_slices(values, indices):
    """Sums `values` associated with any non-unique `indices`.
    Args:
      values: A `Tensor` with rank >= 1.
      indices: A one-dimensional integer `Tensor`, indexing into the first
      dimension of `values` (as in an IndexedSlices object).
    Returns:
      A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a
      de-duplicated version of `indices` and `summed_values` contains the sum of
      `values` slices associated with each unique index.
    """
    unique_indices, new_index_positions = tf.unique(indices)
    summed_values = tf.unsorted_segment_sum(values,
                                            new_index_positions,
                                            tf.shape(unique_indices)[0])
    return (summed_values, unique_indices)
Esempio n. 53
0
  def combine(self, x):
    """Return the output from the experts.

    When one example goes to multiple experts, the outputs are summed.

    Args:
      x: a Tensor with shape [batch, num_experts, expert_capacity, depth]

    Returns:
      a `Tensor` with shape `[batch, length, depth]
    """
    depth = tf.shape(x)[-1]
    x *= tf.expand_dims(self._nonpadding, -1)
    ret = tf.unsorted_segment_sum(
        x, self._flat_indices, num_segments=self._batch * self._length)
    ret = tf.reshape(ret, [self._batch, self._length, depth])
    return ret
Esempio n. 54
0
def extract_fixed_feature_ids(comp, state, stride):
  """Extracts fixed feature IDs.

  Args:
    comp: Component whose fixed feature IDs we wish to extract.
    state: Live MasterState object for the component.
    stride: Tensor containing current batch * beam size.

  Returns:
    state handle: Updated state handle to be used after this call.
    ids: List of [stride * num_steps, 1] feature IDs per channel.  Missing IDs
         (e.g., due to batch padding) are set to -1.
  """
  num_channels = len(comp.spec.fixed_feature)
  if not num_channels:
    return state.handle, []

  for feature_spec in comp.spec.fixed_feature:
    check.Eq(feature_spec.size, 1, 'All features must have size=1')
    check.Lt(feature_spec.embedding_dim, 0, 'All features must be non-embedded')

  state.handle, indices, ids, _, num_steps = dragnn_ops.bulk_fixed_features(
      state.handle, component=comp.name, num_channels=num_channels)
  size = stride * num_steps

  fixed_ids = []
  for channel, feature_spec in enumerate(comp.spec.fixed_feature):
    tf.logging.info('[%s] Adding fixed feature IDs "%s"', comp.name,
                    feature_spec.name)

    # The +1 and -1 increments ensure that missing IDs default to -1.
    #
    # TODO(googleuser): This formula breaks if multiple IDs are extracted at some
    # step.  Try using tf.unique() to enforce the unique-IDS precondition.
    sums = tf.unsorted_segment_sum(ids[channel] + 1, indices[channel], size) - 1
    sums = tf.expand_dims(sums, axis=1)
    fixed_ids.append(network_units.NamedTensor(sums, feature_spec.name, dim=1))
  return state.handle, fixed_ids
Esempio n. 55
0
  def combine(self, expert_out, multiply_by_gates=True):
    """Sum together the expert output, weighted by the gates.

    The slice corresponding to a particular batch element `b` is computed
    as the sum over all experts `i` of the expert output, weighted by the
    corresponding gate values.  If `multiply_by_gates` is set to False, the
    gate values are ignored.

    Args:
      expert_out: a list of `num_experts` `Tensor`s, each with shape
        `[expert_batch_size_i, <extra_output_dims>]`.
      multiply_by_gates: a boolean

    Returns:
      a `Tensor` with shape `[batch_size, <extra_output_dims>]`.
    """
    # see comments on convert_gradient_to_tensor
    stitched = convert_gradient_to_tensor(tf.concat(expert_out, 0))
    if multiply_by_gates:
      stitched *= tf.expand_dims(self._nonzero_gates, 1)
    combined = tf.unsorted_segment_sum(stitched, self._batch_index,
                                       tf.shape(self._gates)[0])
    return combined
Esempio n. 56
0
def find_dup(a):
  """ Find the duplicated elements in 1-D a tensor.
  Args:
    a: 1-D tensor.
    
  Return:
    more_than_one_vals: duplicated value in a.
    indexes_in_a: duplicated value's index in a.
    dups_in_a: duplicated value with duplicate in a.
  """
  unique_a_vals, unique_idx = tf.unique(a)
  count_a_unique = tf.unsorted_segment_sum(tf.ones_like(a),
                                           unique_idx,
                                           tf.shape(a)[0])

  more_than_one = tf.greater(count_a_unique, 1)
  more_than_one_idx = tf.squeeze(tf.where(more_than_one))
  more_than_one_vals = tf.squeeze(tf.gather(unique_a_vals, more_than_one_idx))

  not_duplicated, _ = tf.setdiff1d(a, more_than_one_vals)
  dups_in_a, indexes_in_a = tf.setdiff1d(a, not_duplicated)

  return more_than_one_vals, indexes_in_a, dups_in_a
def SampledSoftmaxLoss(features, sampler, num_classes, target_classes,
                       target_params, sampled_classes, sampled_params):
  """Loss for training softmax classifiers on large label vocabulary.

  This function assumes that we have already chosen the sampled classes and
  fetched the parameters for the target classes and the sampled classes.

  Args:
    features: a Tensor with shape [batch_size, hidden_size]
    sampler: a candidate sampler object
    num_classes: an integer
    target_classes: an integer Tensor with shape [batch_size]
    target_params: a Tensor with shape [batch_size, hidden_size]
      The parameters corresponding to the target classes.
    sampled_classes: an integer tensor with shape [num_sampled_classes]
    sampled_params: a Tensor with shape [num_sampled_classes, hidden_size]
      The parameters corresponding to the sampled classes.

  Returns:
    a Tensor with shape [batch_size]
  """
  sampled_logits = (tf.matmul(features, sampled_params, transpose_b=True) -
                    sampler.log_expected_count(sampled_classes))
  target_logits = (tf.reduce_sum(target_params * features, 1) -
                   sampler.log_expected_count(target_classes))
  sampled_log_denominator = tf.reduce_logsumexp(
      sampled_logits, [1], name='SampledLogDenominator')
  sampled_classes_mask = tf.unsorted_segment_sum(
      tf.fill(tf.shape(sampled_classes), float('-inf')), sampled_classes,
      num_classes)
  target_log_denominator = (
      target_logits + tf.gather(sampled_classes_mask, target_classes))
  combined_log_denominator = tf.reduce_logsumexp(
      tf.stack([sampled_log_denominator, target_log_denominator]), [0])
  loss = combined_log_denominator - target_logits
  return loss
Esempio n. 58
0
def bucket_mean(data, bucket_ids, num_buckets):
    total = tf.unsorted_segment_sum(data, bucket_ids, num_buckets)
    count = tf.unsorted_segment_sum(tf.ones_like(data), bucket_ids, num_buckets)
    return total / count
def discriminative_loss_single(
        prediction,
        correct_label,
        feature_dim,
        label_shape,
        delta_v,
        delta_d,
        param_var,
        param_dist,
        param_reg):
    """
    论文equ(1)提到的实例分割损失函数
    :param prediction: inference of network
    :param correct_label: instance label
    :param feature_dim: feature dimension of prediction
    :param label_shape: shape of label
    :param delta_v: cut off variance distance
    :param delta_d: cut off cluster distance
    :param param_var: weight for intra cluster variance
    :param param_dist: weight for inter cluster distances
    :param param_reg: weight regularization
    """

    # 像素对齐为一行
    correct_label = tf.reshape(
        correct_label, [
            label_shape[1] * label_shape[0]])
    reshaped_pred = tf.reshape(
        prediction, [
            label_shape[1] * label_shape[0], feature_dim])

    # 统计实例个数
    unique_labels, unique_id, counts = tf.unique_with_counts(correct_label)
    counts = tf.cast(counts, tf.float32)
    num_instances = tf.size(unique_labels)

    # 计算pixel embedding均值向量
    segmented_sum = tf.unsorted_segment_sum(
        reshaped_pred, unique_id, num_instances)
    mu = tf.div(segmented_sum, tf.reshape(counts, (-1, 1)))
    mu_expand = tf.gather(mu, unique_id)

    # 计算公式的loss(var)
    distance = tf.norm(tf.subtract(mu_expand, reshaped_pred), axis=1)
    distance = tf.subtract(distance, delta_v)
    distance = tf.clip_by_value(distance, 0., distance)
    distance = tf.square(distance)

    l_var = tf.unsorted_segment_sum(distance, unique_id, num_instances)
    l_var = tf.div(l_var, counts)
    l_var = tf.reduce_sum(l_var)
    l_var = tf.divide(l_var, tf.cast(num_instances, tf.float32))

    # 计算公式的loss(dist)
    mu_interleaved_rep = tf.tile(mu, [num_instances, 1])
    mu_band_rep = tf.tile(mu, [1, num_instances])
    mu_band_rep = tf.reshape(
        mu_band_rep,
        (num_instances *
         num_instances,
         feature_dim))

    mu_diff = tf.subtract(mu_band_rep, mu_interleaved_rep)

    # 去除掩模上的零点
    intermediate_tensor = tf.reduce_sum(tf.abs(mu_diff), axis=1)
    zero_vector = tf.zeros(1, dtype=tf.float32)
    bool_mask = tf.not_equal(intermediate_tensor, zero_vector)
    mu_diff_bool = tf.boolean_mask(mu_diff, bool_mask)

    mu_norm = tf.norm(mu_diff_bool, axis=1)
    mu_norm = tf.subtract(2. * delta_d, mu_norm)
    mu_norm = tf.clip_by_value(mu_norm, 0., mu_norm)
    mu_norm = tf.square(mu_norm)

    l_dist = tf.reduce_mean(mu_norm)

    # 计算原始Discriminative Loss论文中提到的正则项损失
    l_reg = tf.reduce_mean(tf.norm(mu, axis=1))

    # 合并损失按照原始Discriminative Loss论文中提到的参数合并
    param_scale = 1.
    l_var = param_var * l_var
    l_dist = param_dist * l_dist
    l_reg = param_reg * l_reg

    loss = param_scale * (l_var + l_dist + l_reg)

    return loss, l_var, l_dist, l_reg
    def adloss(self,x,xt,y,yt):
        with tf.variable_scope('reuse_inference') as scope:
	    scope.reuse_variables()
            self.inference(x,training=True)
	    source_flattened=self.flattened
	    source_fc1=self.fc1
	    source_fc2=self.fc2
	    source_feature=self.feature
            scope.reuse_variables()
            self.inference(xt,training=True)
	    target_flattened=self.flattened
	    target_fc1=self.fc1
	    target_fc2=self.fc2
	    target_feature=self.feature
	    target_pred=self.output
        with tf.variable_scope('reuse') as scope:
            source_logits,_=D(source_feature)
            scope.reuse_variables()
            target_logits,_=D(target_feature)

	self.target_pred=target_pred	
	self.source_feature=source_feature
	self.target_feature=target_feature
	self.concat_feature=tf.concat([source_feature,target_feature],0)	
	self.last_feature=tf.concat([source_fc1,target_fc1],0)	
	source_result=tf.argmax(y,1)
        target_result=tf.argmax(target_pred,1)
	
	#!!!!!!!!!!!use groudn truth yt to test !!!!!!!!!!!!!!!!!!!
	#target_result=tf.argmax(yt,1)


	#--------- use tf.ones to avoid division by zero -----------------------------
        ones=tf.ones_like(source_feature)
        current_source_count=tf.unsorted_segment_sum(ones,source_result,self.num_classes)
        current_target_count=tf.unsorted_segment_sum(ones,target_result,self.num_classes)
	
        current_positive_source_count=tf.maximum(current_source_count,tf.ones_like(current_source_count))
        current_positive_target_count=tf.maximum(current_target_count,tf.ones_like(current_target_count))

 
	current_source_centroid=tf.divide(tf.unsorted_segment_sum(data=source_feature,segment_ids=source_result,num_segments=self.num_classes),current_positive_source_count)
        current_target_centroid=tf.divide(tf.unsorted_segment_sum(data=target_feature,segment_ids=target_result,num_segments=self.num_classes),current_positive_target_count)
	self.current_target_centroid=current_target_centroid	
        
	source_decay=tf.constant(.3)
	target_decay=tf.constant(.3)
	
	self.source_decay=source_decay
	self.target_decay=target_decay	

	source_centroid=(source_decay)*current_source_centroid+(1.-source_decay)*self.source_moving_centroid
	target_centroid=(target_decay)*current_target_centroid+(1.-target_decay)*self.target_moving_centroid
	

	self.Entropyloss=tf.constant(0.)	
	self.Semanticloss=protoloss(source_centroid,target_centroid)
	
	#!!!!!!!!!!!!compare with individual sample alignment with our centroid alignment method!!!!!!!!!
	#self.Semanticloss=supervised_semantic_loss(source_feature,target_feature,source_result,target_result)
	
        D_real_loss=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=target_logits,labels=tf.ones_like(target_logits)))
        D_fake_loss=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=source_logits,labels=tf.zeros_like(source_logits)))
        self.D_loss=D_real_loss+D_fake_loss
	
        self.G_loss=-self.D_loss
	tf.summary.scalar('JSD',self.G_loss/2+math.log(2))
	
	#------------- Domain Adversarial Loss is scaled by 0.1 following RevGrad--------------------------
        self.G_loss=0.1*self.G_loss
	self.D_loss=0.1*self.D_loss
	return self.G_loss,self.D_loss,source_centroid,target_centroid