def l_n_loss(tensor, n, batch_norm=True): if struct.isstruct(tensor): all_tensors = struct.flatten(tensor) return sum(l_n_loss(tensor, n, batch_norm) for tensor in all_tensors) total_loss = math.sum(tensor**n) / n if batch_norm: batch_size = math.shape(tensor)[0] return math.div(total_loss, math.to_float(batch_size)) else: return total_loss
def l1_loss(tensor, batch_norm=True, reduce_batches=True): if struct.isstruct(tensor): all_tensors = struct.flatten(tensor) return sum(l1_loss(tensor, batch_norm, reduce_batches) for tensor in all_tensors) if reduce_batches: total_loss = math.sum(math.abs(tensor)) else: total_loss = math.sum(math.abs(tensor), axis=list(range(1, len(tensor.shape)))) if batch_norm and reduce_batches: batch_size = math.shape(tensor)[0] return math.div(total_loss, math.to_float(batch_size)) else: return total_loss