def choice_label_by_max(self, logits): logits_reshaped = np.reshape(logits, newshape=[2, int(logits.shape[-1] / 2)]) for i in range(int(logits.shape[-1] / 2)): logits_reshaped[:, i] = softmax(logits_reshaped[:, i]) logits_diff = logits_reshaped[0, :] - logits_reshaped[1, :] label = np.argmax(logits_diff) return [label]
def _make_single_loss_weight(self, loss_weight_list): loss_weight = [] sum_weight = sum(loss_weight_list) for i in range(len(loss_weight_list)): loss_weight.append( math.log(sum_weight / max(loss_weight_list[i], 1))) loss_weight = softmax(loss_weight) return tf.constant(loss_weight, dtype=tf.float32)
def _make_double_loss_weight(self, loss_weight_list): loss_weight = [] sum_weight = sum(loss_weight_list) loss_weight_list_ = [sum_weight - lw for lw in loss_weight_list] for i in range(len(loss_weight_list)): tmp_loss_weight = softmax([math.log(sum_weight / max(loss_weight_list[i], 1)), math.log(sum_weight / max(loss_weight_list_[i], 1))]) loss_weight.append(tmp_loss_weight.tolist()) return tf.constant(loss_weight, dtype=tf.float32)
def _random_pick(self, word_prob): words, probs = zip(*word_prob) probs = softmax(np.array(probs)).tolist() x = uniform(0, 1) cum_prob = 0. result = None for item, prob in zip(words, probs): cum_prob += prob if x < cum_prob: result = item break return result
def predict(self, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: if len(x) == 0: return np.asarray([], dtype=np.int32), np.asarray([], dtype=np.float32) outputs = self.sess.run(self._output, feed_dict={self._inputs: x}) outputs_cls = np.argmax(outputs, axis=1) return outputs_cls, softmax(outputs)