Ejemplo n.º 1
0
def test_CE_loss(sess, CE_arrays):
    y, y_hat = CE_arrays
    y = tf.convert_to_tensor(y, dtype=tf.float64)
    y_hat = tf.convert_to_tensor(y_hat, dtype=tf.float64)
    sess.run(cross_entropy_loss(y,y_hat))
    assert 1
    print("CE_loss ran to completion")
Ejemplo n.º 2
0
    def add_loss_op(self, pred):
        """Adds cross_entropy_loss ops to the computational graph.

        Hint: Use the cross_entropy_loss function we defined. This should be a very
                    short function.
        Args:
            pred: A tensor of shape (batch_size, n_classes)
        Returns:
            loss: A 0-d tensor (scalar)
        """
        loss = cross_entropy_loss(y=self.labels_placeholder, yhat=pred)
        return loss
Ejemplo n.º 3
0
    def add_loss_op(self, pred):
        """Adds cross_entropy_loss ops to the computational graph.

        Hint: Use the cross_entropy_loss function we defined. This should be a very
                    short function.
        Args:
            pred: A tensor of shape (batch_size, n_classes)
        Returns:
            loss: A 0-d tensor (scalar)
        """
        loss = cross_entropy_loss(self.labels_placeholder, pred)
        return loss
Ejemplo n.º 4
0
  def add_loss_op(self, pred):
    """Adds cross_entropy_loss ops to the computational graph.

    Hint: Use the cross_entropy_loss function we defined. This should be a very
          short function.
    Args:
      pred: A tensor of shape (batch_size, n_classes)
    Returns:
      loss: A 0-d tensor (scalar)
    """
    ### YOUR CODE HERE
    #raise NotImplementedError
    return cross_entropy_loss(self.labels_placeholder, pred)   #pred =a dd_model with input placeholder
Ejemplo n.º 5
0
    def add_loss_op(self, pred):
        """Adds cross_entropy_loss ops to the computational graph.

        Hint: Use the cross_entropy_loss function we defined. This should be a very
                    short function.
        Args:
            pred: A tensor of shape (batch_size, n_classes)
        Returns:
            loss: A 0-d tensor (scalar)
        """
        ### YOUR CODE HERE
        loss = cross_entropy_loss(self.labels, pred)
        ### END YOUR CODE
        return loss
Ejemplo n.º 6
0
    def add_loss_op(self, pred):
        """Adds cross_entropy_loss ops to the computational graph.

        Hint: Use the cross_entropy_loss function we defined. This should be a very
                    short function.
        Args:
            pred: A tensor of shape (batch_size, n_classes)
        Returns:
            loss: A 0-d tensor (scalar)
        """
        ### YOUR CODE HERE
        #labels = tf.get_variable("labels",shape=(self.config.batch_size,self.config.n_classes))
        loss = cross_entropy_loss(self.labels_placeholder,pred)
        ### END YOUR CODE
        return loss
Ejemplo n.º 7
0
  def add_loss_op(self, pred):
    """Adds cross_entropy_loss ops to the computational graph.

    Hint: Use the cross_entropy_loss function we defined. This should be a very
          short function.
    Args:
      pred: A tensor of shape (batch_size, n_classes)
    Returns:
      loss: A 0-d tensor (scalar)
    """
    ### YOUR CODE HERE
    # print self.input_labels.shape, pred.get_shape()
    loss = cross_entropy_loss(self.labels_placeholder, pred)
    ### END YOUR CODE
    return loss
Ejemplo n.º 8
0
    def add_loss_op(self, pred):
        """Adds cross_entropy_loss ops to the computational graph.

    Hint: Use the cross_entropy_loss function we defined. This should be a very
          short function.
    Args:
      pred: A tensor of shape (batch_size, n_classes)
    Returns:
      loss: A 0-d tensor (scalar)
    """
        ### YOUR CODE HERE
        with tf.variable_scope("softmax_layer"):
            loss = cross_entropy_loss(self.labels_placeholder,
                                      self.add_model(self.input_placeholder))
        ### END YOUR CODE
        return loss
Ejemplo n.º 9
0
    def add_loss_op(self, pred):
        """Adds cross_entropy_loss ops to the computational graph.

    Hint: Use the cross_entropy_loss function we defined. This should be a very
          short function.
    Args:
      pred: A tensor of shape (batch_size, n_classes)
    Returns:
      loss: A 0-d tensor (scalar)
    """
        ### YOUR CODE HERE
        # pass in our predicted probability distribution along with the real values
        # for y in the labels_placeholder variable to calculate a single scalar
        # value that determines how far off we are in being good at predicting in
        # our model
        loss = cross_entropy_loss(self.labels_placeholder, pred)
        ### END YOUR CODE
        return loss
Ejemplo n.º 10
0
    def add_loss_op(self, pred):
        """Adds cross_entropy_loss ops to the computational graph.

        Hint: Use the cross_entropy_loss function we defined. This should be a very
                    short function.
        Args:
            pred: A tensor of shape (batch_size, n_classes)
        Returns:
            loss: A 0-d tensor (scalar)
        """
        ### YOUR CODE HERE
        """
        因为我们已经在q1_softmax.py中定义并实现了cross_entropy_loss()函数,所以这里可以直接调用
        self.labels_placeholder 是"喂"进来的真实标记
        pred    是我们预测的
        """
        loss = cross_entropy_loss(self.labels_placeholder, pred)
        ### END YOUR CODE
        return loss
Ejemplo n.º 11
0
    def add_loss_op(self, pred):
        """Adds cross_entropy_loss ops to the computational graph.

        Hint: Use the cross_entropy_loss function we defined. This should be a very
                    short function.
        Args:
            pred: A tensor of shape (batch_size, n_classes)
        Returns:
            loss: A 0-d tensor (scalar)
        """
        ### YOUR CODE HERE
        y_true = self.labels_placeholder
        # print "y_true shape"
        # print y_true.shape
        # print "pred shape"
        # print pred.shape
        loss = cross_entropy_loss(y_true, pred)
        ### END YOUR CODE
        return loss
Ejemplo n.º 12
0
    def fit(self, inputs, labels):
        optimizer = optim.SGD(self.parameters(), lr=self.config.lr)
        self.train()
        losses = []
        inputs = torch.from_numpy(inputs)
        labels = torch.from_numpy(labels)
        for epoch in range(self.config.n_epochs):
            start_time = time.time()
            optimizer.zero_grad()

            pred = self(inputs)
            loss = cross_entropy_loss(labels, pred)
            loss.backward()
            optimizer.step()
            average_loss = np.mean(loss.detach().numpy())
            duration = time.time() - start_time
            print 'Epoch {:}: loss = {:.2f} ({:.3f} sec)'.format(
                epoch, average_loss, duration)
            losses.append(average_loss)
        return losses
Ejemplo n.º 13
0
    def add_loss_op(self, pred):
        """Adds cross_entropy_loss ops to the computational graph.

    Hint: Use the cross_entropy_loss function we defined. This should be a very
          short function.
    Args:
      pred: A tensor of shape (batch_size, n_classes)
    Returns:
      loss: A 0-d tensor (scalar)
    """
        '''print(tf.Session().run(pred))
    yhat=pred
    y=self.label_placeholder
    y = tf.cast(y, tf.float32)
    sum_= -tf.multiply(y,tf.log(yhat))
    loss=tf.reduce_sum(sum_)'''

        y = self.label_placeholder
        loss = cross_entropy_loss(y, pred)
        ### YOUR CODE HERE
        ### END YOUR CODE
        return loss
Ejemplo n.º 14
0
        ### END YOUR CODE
        return pred

    def add_loss_op(self, pred):
        """Adds cross_entropy_loss ops to the computational graph.

        Hint: Use the cross_entropy_loss function we defined. This should be a very
                    short function.
        Args:
            pred: A tensor of shape (batch_size, n_classes)
        Returns:
            loss: A 0-d tensor (scalar)
        """
        ### YOUR CODE HERE
<<<<<<< HEAD
        loss = cross_entropy_loss(pred, self.labels_placeholder)
=======
        loss = cross_entropy_loss(self.labels_placeholder,pred)
>>>>>>> master
        ### END YOUR CODE
        return loss

    def add_training_op(self, loss):
        """Sets up the training Ops.

        Creates an optimizer and applies the gradients to all trainable variables.
        The Op returned by this function is what must be passed to the
        `sess.run()` call to cause the model to train. See

        https://www.tensorflow.org/api_docs/python/tf/train/Optimizer
Ejemplo n.º 15
0
def test_CE_loss_validation(sess, CE_arrays):
    y, y_hat = CE_arrays
    y = tf.convert_to_tensor(y, dtype=tf.float64)
    y_hat = tf.convert_to_tensor(y_hat, dtype=tf.float64)
    value = sess.run(cross_entropy_loss(y,y_hat))
    assert rel_error(value, -3 * np.log(0.5)) <= 1e-7
 def compute_loss(self, pred):
     y = dy.inputTensor(self.labels)
     loss = cross_entropy_loss(y, pred)
     return loss
 def compute_loss(self, pred):
     y = dy.inputTensor(np.transpose(self.labels), batched=True)
     losses = cross_entropy_loss(y, pred)
     loss = dy.sum_batches(losses) / self.config.batch_size
     return loss
Ejemplo n.º 18
0
	def add_loss_op(self, pred):
		loss = cross_entropy_loss(self.labels_placeholder, pred)
		return loss
Ejemplo n.º 19
0
    def add_loss_op(self, pred):

        ### YOUR CODE HERE
        loss = cross_entropy_loss(self.labels_placeholder, pred)
        ### END YOUR CODE
        return loss