def _nll(self, labels, logits):
        """Negative log likelihood - the reconstruction term in the ELBO. It is softmax cross entropy
		for multiclass classification.

		Returns:
			The mean cross entropy tensorflow op
		"""
        return tf.reduce_mean(
            softmax_cross_entropy_with_logits(labels=labels, logits=logits))
    def __init__(self,
                 data,
                 mode,
                 lstm_units,
                 hidden_units,
                 output_units,
                 init,
                 beta=0.0001,
                 keep_prob=0.7):
        self.data = data
        self.mode = mode
        self.lstm_units = lstm_units
        # a list lstm-attention-fc-output
        self.hidden_units = hidden_units
        self.output_units = output_units
        self.init = init
        self.beta = beta
        self.keep_prob = keep_prob
        self.regularizer = layers.l2_regularizer(scale=self.beta)

        with tf.name_scope('RNN'):
            lstm_outputs = self._rnn(data.input)

        with tf.name_scope('Attention'):
            attention_output = self._attention(lstm_outputs)

        pred = self._full_connected(attention_output)
        self.pred = nn.softmax(pred)
        self.pred_label = tf.argmax(pred, 1)

        if self.mode != 'test':
            with tf.name_scope('Label'):
                label = tf.identity(data.label, 'label')

            with tf.name_scope('Loss'):
                normal_loss = tf.reduce_mean(
                        nn.softmax_cross_entropy_with_logits(logits=pred,
                                                             labels=label))
                # it's a list
                reg_losses = tf.get_collection(GraphKeys.REGULARIZATION_LOSSES)
                loss = tf.add(normal_loss,
                              tf.add_n(reg_losses),
                              name='loss')
            self.loss = loss
            tf.summary.scalar('loss', self.loss)

            with tf.name_scope('Acc'):
                correct_prediction = tf.equal(tf.argmax(pred, 1),
                                                tf.argmax(label, 1))
                accuracy = tf.reduce_mean(tf.cast(correct_prediction,
                                                    tf.float32),
                                            name='acc')
            self.accuracy = accuracy
            tf.summary.scalar('accuracy', self.accuracy)
Example #3
0
    def call(self, y_true, y_pred):
        y_true, y_pred = self.process_y(y_true, y_pred)
        self.assert_inputs(y_true, y_pred)
        dtype = y_pred.dtype

        # Masks
        class_mask = tf.cast((y_true >= 1), dtype)
        labels, _ = tf.linalg.normalize(class_mask, ord=1, axis=1)

        # Similarities
        sims = y_pred / self.temp

        return nn.softmax_cross_entropy_with_logits(labels, sims)
Example #4
0
    def call(self, y_true, y_pred):
        y_true, y_pred = self.process_y(y_true, y_pred)
        self.assert_inputs(y_true, y_pred)
        dtype = y_pred.dtype

        # Masks
        inst_mask = tf.cast((y_true == 2), dtype)

        # Similarities
        sims = y_pred
        inst_loss = nn.softmax_cross_entropy_with_logits(
            inst_mask, sims / self.temp)
        return inst_loss
Example #5
0
def loss(itr, tra_img, tra_lab, model):

    tra_pre = model.call(tra_img)
    loss = softmax_cross_entropy_with_logits(labels=tra_lab, logits=tra_pre)
    loss = tf.reduce_mean(loss)
    tra_pre_arg = tf.argmax(tra_pre, 1).numpy()
    tra_tar = np.argmax(tra_lab, 1)
    tra_acc = np.sum(tra_pre_arg == tra_tar) / len(tra_tar)
    #100イテレーション毎に誤差を出力
    if (itr + 1) % (5000 / 500) == 0:
        print("step {}:\tloss = {}\taccuracy = {}".format(
            itr + 1, loss.numpy(), tra_acc))
    return loss
Example #6
0
    tra_loss_list = []
    tra_acc_list = []
    val_loss_list = []
    val_acc_list = []
    train_num = tra_img.shape[0]
    tra_loss = 0
    tra_acc = 0
    #バッチ毎に分ける
    for batch in range(train_num // batch_size):
        batch_img = tra_img[batch * batch_size:(batch + 1) *
                            batch_size, :, :, :]
        batch_lab = tra_lab[batch * batch_size:(batch + 1) * batch_size]

        batch_pre = model.call(batch_img)
        _tra_loss = softmax_cross_entropy_with_logits(labels=batch_lab,
                                                      logits=batch_pre)
        tra_loss = tf.reduce_mean(_tra_loss).numpy() + tra_loss

        tra_pre_arg = tf.argmax(batch_pre, 1).numpy()
        tra_tar = np.argmax(batch_lab, 1)
        tra_acc = np.sum(tra_pre_arg == tra_tar) + tra_acc

    tra_loss_list.append(tra_loss / (train_num // batch_size))
    tra_acc_list.append(tra_acc / train_num)

    val_pre = model.call(val_img[:, :, :, :])
    val_loss = softmax_cross_entropy_with_logits(labels=val_lab,
                                                 logits=val_pre)
    val_loss = tf.reduce_mean(val_loss).numpy()
    val_pre_arg = tf.argmax(val_pre, 1).numpy()
    val_tar = np.argmax(val_lab, 1)
 def execute(self,guess,y):
   return reduce_mean(softmax_cross_entropy_with_logits(y,guess))
Example #8
0
def loss_with_action_masking(y_true, y_pred):
    logits = y_true
    labels = tf.where(y_true == 0., 1e-6, y_pred)
    return softmax_cross_entropy_with_logits(labels=labels, logits=logits)
Example #9
0
def getLoss(logits, labels):
    return nn.softmax_cross_entropy_with_logits(labels, logits)
 def implement(self, x, labels):
     return reduce_mean(
         nn.softmax_cross_entropy_with_logits(labels=labels, logits=x))
Example #11
0
def loss(y_true, y_pred):
    l = mean_squared_error(y_true[1], y_pred[1])
    l += softmax_cross_entropy_with_logits(y_true[0], y_pred[0])
    return l