def test_square(): inp = np.ones((INT_OVERFLOW, 2)) inp[-1, -1] = 3 inp.attach_grad() with mx.autograd.record(): out = np.square(inp) out.backward() assert out.shape == inp.shape assert out[-1, -1] == 9 assert inp.grad.shape == inp.shape assert inp.grad[-1, -1] == 6
def evaluator(network, inter_matrix, test_data, ctx): scores = [] for values in inter_matrix: feat = gluon.utils.split_and_load(values, ctx, even_split=False) scores.extend([network(i).asnumpy() for i in feat]) recons = np.array([item for sublist in scores for item in sublist]) # Calculate the test RMSE. rmse = np.sqrt( np.sum(np.square(test_data - np.sign(test_data) * recons)) / np.sum(np.sign(test_data))) return float(rmse)
def forward(self, length_predictions, labels): """ Returns MSE loss. :param length_predictions: Length predictions. Shape: (batch_size,). :param labels: Targets. Shape: (batch_size,). :return: MSE loss of length predictions of the batch. """ # (batch_size,) loss = (self.weight / 2) * np.square(length_predictions - labels) # (1,) loss = np.sum(loss) num_samples = np.sum(np.ones_like(length_predictions)) return loss, num_samples
def mse_loss(x1, x2): loss = np.square(x1 - x2).mean() if loss is None: return 0 else: return loss
def global_norm(ndarrays: List[np.ndarray]) -> float: # accumulate in a list, as item() is blocking and this way we can run the norm calculation in parallel. norms = [ np.square(np.linalg.norm(arr)) for arr in ndarrays if arr is not None ] return sqrt(sum(norm.item() for norm in norms))
def style_loss(Y_hat, gram_Y): return np.square(gram(Y_hat) - gram_Y).mean()
def content_loss(Y_hat, Y): return np.square(Y_hat, Y).mean()
def style_loss(y_hat, gram_y): return np.square(StyleTransferGF.gram(y_hat) - gram_y).mean()
def content_loss(y_hat, y): return np.square(y_hat, y).mean()