Exemple #1
0
 def forward(self, bottom, top):
     """Computes the forward pass."""
     # Get features and output
     pred = bottom[0].data()
     prob = top[0].init_data(pred.shape, pred.dtype, setdata=False)
     prob[:] = pred
     # normalize by subtracting the max to suppress numerical issues
     prob -= prob.max(axis=1)[:, np.newaxis]
     logexp.exp(prob, out=prob)
     prob /= prob.sum(axis=1)[:, np.newaxis]
 def forward(self, bottom, top):
     """Computes the forward pass."""
     # Get features and output
     pred = bottom[0].data()
     prob = top[0].init_data(pred.shape, pred.dtype, setdata=False)
     prob[:] = pred
     # normalize by subtracting the max to suppress numerical issues
     prob -= prob.max(axis=1)[:, np.newaxis]
     logexp.exp(prob, out=prob)
     prob /= prob.sum(axis=1)[:, np.newaxis]
Exemple #3
0
 def forward(self, bottom, top):
     pred = bottom[0].data()
     prob = self._prob.init_data(pred.shape, pred.dtype, setdata=False)
     prob[:] = pred
     prob -= prob.max(axis=1)[:, np.newaxis]
     logexp.exp(prob, out=prob)
     prob /= prob.sum(axis=1)[:, np.newaxis]
     diff = bottom[0].init_diff(setzero=False)
     diff[:] = prob
     logexp.log(prob, out=prob)
     label = bottom[1].data()
     if label.ndim == 1:
         # The labels are given as a sparse vector.
         diff[np.arange(diff.shape[0]), label] -= 1.
         self._loss = -prob[np.arange(diff.shape[0]), label].sum()
     else:
         # The labels are given as a dense matrix.
         diff -= label
         self._loss = -np.dot(prob.flat, label.flat)
     # finally, scale down by the number of data points
     diff *= self.spec['weight'] / diff.shape[0]
     self._loss *= self.spec['weight'] / diff.shape[0]
Exemple #4
0
 def forward(self, bottom, top):
     pred = bottom[0].data()
     prob = self._prob.init_data(
         pred.shape, pred.dtype, setdata=False)
     prob[:] = pred
     prob -= prob.max(axis=1)[:, np.newaxis]
     logexp.exp(prob, out=prob)
     prob /= prob.sum(axis=1)[:, np.newaxis]
     diff = bottom[0].init_diff(setzero=False)
     diff[:] = prob
     logexp.log(prob, out=prob)
     label = bottom[1].data()
     if label.ndim == 1:
         # The labels are given as a sparse vector.
         diff[np.arange(diff.shape[0]), label] -= 1.
         self._loss = -prob[np.arange(diff.shape[0]), label].sum()
     else:
         # The labels are given as a dense matrix.
         diff -= label
         self._loss = -np.dot(prob.flat, label.flat)
     # finally, scale down by the number of data points
     diff *= self.spec['weight'] / diff.shape[0]
     self._loss *= self.spec['weight'] / diff.shape[0]
Exemple #5
0
 def forward(self, bottom, top):
     pred = bottom[0].data()
     label = bottom[1].data()[:, np.newaxis]
     prob = logexp.exp(pred)
     numexpr.evaluate("prob / (1. + prob)", out=prob)
     diff = bottom[0].init_diff(setzero=False)
     numexpr.evaluate("label - prob", out=diff)
     self._loss = np.dot(label.flat, logexp.log(prob).flat) + \
                  np.dot((1. - label).flat, logexp.log(1. - prob).flat)
     # finally, scale down by the number of data points
     # Also, since we we computing the Loss (minimizing), we change the
     # sign of the loss value.
     diff *= - self.spec['weight'] / diff.shape[0]
     self._loss *= - self.spec['weight'] / diff.shape[0]
Exemple #6
0
 def forward(self, bottom, top):
     pred = bottom[0].data()
     label = bottom[1].data()[:, np.newaxis]
     prob = logexp.exp(pred)
     numexpr.evaluate("prob / (1. + prob)", out=prob)
     diff = bottom[0].init_diff(setzero=False)
     numexpr.evaluate("label - prob", out=diff)
     self._loss = np.dot(label.flat, logexp.log(prob).flat) + \
                  np.dot((1. - label).flat, logexp.log(1. - prob).flat)
     # finally, scale down by the number of data points
     # Also, since we we computing the Loss (minimizing), we change the
     # sign of the loss value.
     diff *= -self.spec['weight'] / diff.shape[0]
     self._loss *= -self.spec['weight'] / diff.shape[0]