def update_target_network_L1(self): for param_q, param_k in zip(self.towers[0].parameters(), self.towers[1].parameters()): paddle.assign( param_k - (1 - self.m) * paddle.sign(param_k - param_q), param_k) param_k.stop_gradient = True
def mu_law_encode(x: Tensor, mu: int = 256, quantized: bool = True) -> Tensor: """Mu-law encoding. Compute the mu-law decoding given an input code. When quantized is True, the result will be converted to integer in range [0,mu-1]. Otherwise, the resulting signal is in range [-1,1] Parameters: x(Tensor): the input tensor of arbitrary shape to be encoded. mu(int): the maximum value (depth) of encoded signal. The signal will be clip to be in range [0,mu-1]. quantized(bool): indicate whether the signal will quantized to integers. Examples: .. code-block:: python import paddle import paddleaudio.functional as F F.mu_law_encode(paddle.randn((2, 8))) >> Tensor(shape=[2, 8], dtype=int32, place=CUDAPlace(0), stop_gradient=True, [[0, 5, 30, 255, 255, 255, 12, 13], [0, 241, 8, 243, 7, 35, 84, 228]]) Reference: https://en.wikipedia.org/wiki/%CE%9C-law_algorithm """ mu = mu - 1 y = paddle.sign(x) * paddle.log1p(mu * paddle.abs(x)) / math.log1p(mu) if quantized: y = (y + 1) / 2 * mu + 0.5 # convert to [0 , mu-1] y = paddle.clip(y, min=0, max=mu).astype('int32') return y
def test_dygraph(self): with fluid.dygraph.guard(): np_x = np.array([-1., 0., -0., 1.2, 1.5], dtype='float64') x = paddle.to_tensor(np_x) z = paddle.sign(x) np_z = z.numpy() z_expected = np.sign(np_x) self.assertEqual((np_z == z_expected).all(), True)
def test_static(self): with program_guard(Program(), Program()): # The input type of sign_op must be Variable or numpy.ndarray. input1 = 12 self.assertRaises(TypeError, paddle.tensor.math.sign, input1) # The input dtype of sign_op must be float16, float32, float64. input2 = fluid.layers.data(name='input2', shape=[12, 10], dtype="int32") input3 = fluid.layers.data(name='input3', shape=[12, 10], dtype="int64") self.assertRaises(TypeError, paddle.tensor.math.sign, input2) self.assertRaises(TypeError, paddle.tensor.math.sign, input3) input4 = fluid.layers.data(name='input4', shape=[4], dtype="float16") paddle.sign(input4)
def mu_law_decode(x: Tensor, mu: int = 256, quantized: bool = True) -> Tensor: """Mu-law decoding. Compute the mu-law decoding given an input code. Parameters: x(Tensor): the input tensor of arbitrary shape to be decoded. mu(int): the maximum value of encoded signal, which should be the same as that in mu_law_encode(). quantized(bool): whether the signal has been quantized to integers. The value should be the same as that used in mu_law_encode() shape: input: any shape output: same as input Notes: This function assumes that the input x is in the range [0,mu-1] when quantize is True and [-1,1] otherwise. Examples: .. code-block:: python import paddle import paddleaudio.functional as F F.mu_law_decode(paddle.randint(0, 255, shape=(2, 8))) >> Tensor(shape=[2, 8], dtype=float32, place=CUDAPlace(0), stop_gradient=True, [[0.00796641, -0.28048742, -0.13789690, 0.67482352, -0.05550348, -0.00377374, 0.64593655, 0.03134083], [0.45497340, -0.29312974, 0.29312995, -0.70499402, 0.51892924, -0.15078513, 0.07322186, 0.70499456]]) Reference: https://en.wikipedia.org/wiki/%CE%9C-law_algorithm """ if mu < 1: raise ParameterError('mu is typically set as 2**k-1, k=1, 2, 3,...') mu = mu - 1 if quantized: # undo the quantization x = x * 2 / mu - 1 x = paddle.sign(x) / mu * ((1 + mu)**paddle.abs(x) - 1) return x
def orthogonal_(tensor, gain=1): r"""Fills the input `Tensor` with a (semi) orthogonal matrix, as described in `Exact solutions to the nonlinear dynamics of learning in deep linear neural networks` - Saxe, A. et al. (2013). The input tensor must have at least 2 dimensions, and for tensors with more than 2 dimensions the trailing dimensions are flattened. Args: tensor: an n-dimensional `torch.Tensor`, where :math:`n \geq 2` gain: optional scaling factor Examples: >>> w = torch.empty(3, 5) >>> nn.init.orthogonal_(w) """ if tensor.ndimension() < 2: raise ValueError("Only tensors with 2 or more dimensions are supported") if paddle.fluid.data_feeder.convert_dtype(tensor.dtype) != "float32": raise ValueError("Only tensors in float32 dtype are supported") rows = tensor.shape[0] cols = tensor.numel() // rows flattened = np.random.randn(rows, cols).astype("float32") if rows < cols: flattened = flattened.T # Compute the qr factorization q, r = np.linalg.qr(flattened) # Make Q uniform according to https://arxiv.org/pdf/math-ph/0609050.pdf d = np.diag(r, 0) ph = paddle.sign(paddle.to_tensor(d)) q = paddle.to_tensor(q) * ph if rows < cols: q.t() with paddle.no_grad(): tensor.reshape(q.shape).set_value(q * gain) return tensor
def step(self): c = self.c mu = self.mu lr = self.lr l1_diff = c * math.pow(lr, (0.5 + mu)) * math.pow( self.iterations + 1., mu) - c * math.pow( lr, (0.5 + mu)) * math.pow(self.iterations + 0., mu) self.l1_accumulation += l1_diff first_iter = max(1 - self.iterations, 0) updates = [] grads = [x.grad for x in self.params] for p, g, a in zip(self.params, grads, self.accumulators): new_a = a + first_iter * p - self.lr * g updates.append((a, new_a)) new_a_l1 = paddle.abs(new_a) - self.l1_accumulation new_p = paddle.sign(new_a) * paddle.clip(new_a_l1, min=0) updates.append([p, new_p]) for raw_value, new_value in updates: raw_value.set_value(new_value) self.iterations += 1
def cal_feature(engine, name='gallery'): all_feas = None all_image_id = None all_unique_id = None has_unique_id = False if name == 'gallery': dataloader = engine.gallery_dataloader elif name == 'query': dataloader = engine.query_dataloader elif name == 'gallery_query': dataloader = engine.gallery_query_dataloader else: raise RuntimeError("Only support gallery or query dataset") max_iter = len(dataloader) - 1 if platform.system() == "Windows" else len( dataloader) for idx, batch in enumerate(dataloader): # load is very time-consuming if idx >= max_iter: break if idx % engine.config["Global"]["print_batch_step"] == 0: logger.info( f"{name} feature calculation process: [{idx}/{len(dataloader)}]" ) if engine.use_dali: batch = [ paddle.to_tensor(batch[0]['data']), paddle.to_tensor(batch[0]['label']) ] batch = [paddle.to_tensor(x) for x in batch] batch[1] = batch[1].reshape([-1, 1]).astype("int64") if len(batch) == 3: has_unique_id = True batch[2] = batch[2].reshape([-1, 1]).astype("int64") out = engine.model(batch[0], batch[1]) batch_feas = out["features"] # do norm if engine.config["Global"].get("feature_normalize", True): feas_norm = paddle.sqrt( paddle.sum(paddle.square(batch_feas), axis=1, keepdim=True)) batch_feas = paddle.divide(batch_feas, feas_norm) # do binarize if engine.config["Global"].get("feature_binarize") == "round": batch_feas = paddle.round(batch_feas).astype("float32") * 2.0 - 1.0 if engine.config["Global"].get("feature_binarize") == "sign": batch_feas = paddle.sign(batch_feas).astype("float32") if all_feas is None: all_feas = batch_feas if has_unique_id: all_unique_id = batch[2] all_image_id = batch[1] else: all_feas = paddle.concat([all_feas, batch_feas]) all_image_id = paddle.concat([all_image_id, batch[1]]) if has_unique_id: all_unique_id = paddle.concat([all_unique_id, batch[2]]) if engine.use_dali: dataloader.reset() if paddle.distributed.get_world_size() > 1: feat_list = [] img_id_list = [] unique_id_list = [] paddle.distributed.all_gather(feat_list, all_feas) paddle.distributed.all_gather(img_id_list, all_image_id) all_feas = paddle.concat(feat_list, axis=0) all_image_id = paddle.concat(img_id_list, axis=0) if has_unique_id: paddle.distributed.all_gather(unique_id_list, all_unique_id) all_unique_id = paddle.concat(unique_id_list, axis=0) logger.info("Build {} done, all feat shape: {}, begin to eval..".format( name, all_feas.shape)) return all_feas, all_image_id, all_unique_id
''' Author: your name Date: 2021-07-07 10:14:46 LastEditTime: 2021-07-08 12:02:22 LastEditors: Please set LastEditors Description: In User Settings Ed FilePath: /scripts/op2func/sign.py ''' import paddle import numpy as np np_x = np.array([-1., 0., -0., 1.2, 1.5], dtype='float64') x = paddle.to_tensor(np_x) z = paddle.sign(x) print(z)
def forward(self, inputs): """ forward """ x = paddle.sign(inputs) return x