def Loss(embedding, label, _lossType="Softmax", loss_l2_reg=FLAGS.loss_l2_reg): embedding_split = tf.split(embedding, 2, axis=0) label_split = tf.split(label, 2, axis=0) embedding_anchor = embedding_split[0] embedding_positive = embedding_split[1] label_positive = label_split[1] _Loss = 0 if _lossType == "Softmax": print("Use Softmax") W_fc2 = nn_Ops.weight_variable([FLAGS.gap_dim, 10]) b_fc2 = nn_Ops.bias_variable([10]) y_conv = tf.matmul(embedding, W_fc2) + b_fc2 _Loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=label, logits=y_conv)) elif _lossType == "Contrastive_Loss": print("Use Contrastive_Loss_v2") embedding_anchor = tf.split(embedding_anchor, 2, axis=0) embedding_positive = tf.split(embedding_positive, 2, axis=0) _Loss = contrastive_loss_v2(embedding_anchor[0], embedding_anchor[1], embedding_positive[0], embedding_positive[1], alpha=1.0) elif _lossType == "Triplet_Semihard": print("Use Triplet_semihard") _Loss = triplet_semihard_loss(label, embedding) elif _lossType == "LiftedStructLoss": print("Use LiftedStructLoss") _Loss = lifted_struct_loss(label, embedding) elif _lossType == "NpairLoss": print("Use NpairLoss") _Loss = npairs_loss(label_positive, embedding_anchor, embedding_positive, reg_lambda=loss_l2_reg) elif _lossType == "Triplet": print("Use Triplet Loss") embedding3 = tf.split(embedding, 3, axis=0) anchor = embedding3[0] positive = embedding3[1] negative = embedding3[2] _Loss = triplet_loss(anchor, positive, negative) elif _lossType == "New_npairLoss": print("Use new NpairLoss") _Loss = new_npair_loss(labels=label, embedding_anchor=embedding_anchor, embedding_positive=embedding_positive, reg_lambda=loss_l2_reg, equal_shape=True, half_batch_size=int(FLAGS.batch_size / 2)) return _Loss
def cross_entropy(embedding, label, size=1024): with tf.variable_scope("Softmax_classifier"): W_fc = weight_variable([size, FLAGS.num_class], "softmax_w", wd=False) b_fc = bias_variable([FLAGS.num_class], "softmax_b") Logits = tf.matmul(embedding, W_fc) + b_fc cross_entropy = tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label, logits=Logits)) return cross_entropy, W_fc, b_fc
def Binary_sofmax(anc_emb, pos_emb, label, cycle, weight, emb_size): cross_entropy = tf.constant(0., tf.float32) with tf.variable_scope("Softmax_classifier"): W_fc = nn_Ops.weight_variable([2048, 2], "softmax_w", wd=False) b_fc = nn_Ops.bias_variable([2], "softmax_b") for i in range(cycle): if i >= 64: break pos_f = tf.slice(input_=pos_emb, begin=[0, 0], size=[i, emb_size]) label_f = tf.slice(input_=label, begin=[0], size=[i]) pos_b = tf.slice(input_=pos_emb, begin=[i, 0], size=[64 - i, emb_size]) label_b = tf.slice(input_=label, begin=[i], size=[64 - i]) pos_temp = tf.concat([pos_b, pos_f], axis=0) label_temp = tf.concat([label_b, label_f], axis=0) logits = tf.matmul(tf.concat([anc_emb, pos_temp], axis=1), W_fc) + b_fc label_binary = tf.cast(tf.equal(label, label_temp), tf.int32) weight_m = tf.cast(tf.logical_not(tf.equal(label, label_temp)), tf.float32) \ * weight + tf.cast(label_binary, tf.float32) cross_entropy += tf.reduce_mean( tf.multiply( tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=label_binary), weight_m)) / np.float32(cycle) return cross_entropy, W_fc, b_fc