def test_sin_(self): device = self.get_device() cpu_sin_pi_ = torch.Tensor([np.pi]) torch.sin_(cpu_sin_pi_) ort_sin_pi_ = torch.Tensor([np.pi]).to(device) torch.sin_(ort_sin_pi_) cpu_sin_pi = torch.sin(torch.Tensor([np.pi])) ort_sin_pi = torch.sin(torch.Tensor([np.pi]).to(device)) assert torch.allclose(cpu_sin_pi, ort_sin_pi.cpu()) assert torch.allclose(cpu_sin_pi_, ort_sin_pi_.cpu()) assert torch.allclose(ort_sin_pi.cpu(), ort_sin_pi_.cpu())
# version with an appended underscore (``_``) that will alter a tensor in # place. # # For example: # a = torch.tensor([0, math.pi / 4, math.pi / 2, 3 * math.pi / 4]) print('a:') print(a) print(torch.sin(a)) # this operation creates a new tensor in memory print(a) # a has not changed b = torch.tensor([0, math.pi / 4, math.pi / 2, 3 * math.pi / 4]) print('\nb:') print(b) print(torch.sin_(b)) # note the underscore print(b) # b has changed ####################################################################### # For arithmetic operations, there are functions that behave similarly: # a = torch.ones(2, 2) b = torch.rand(2, 2) print('Before:') print(a) print(b) print('\nAfter adding:') print(a.add_(b)) print(a)
[[-1, -1, -1], [-1, 10, -1], [-1, -1, -1]], device = device, dtype=torch.float) print(fenced_ten.numel()) print(fenced_ten.size()) print(fenced_ten.cpu()) print(fenced_ten.relu().cpu()) a = torch.ones(3, 3).to(device) b = torch.ones(3, 3) c = a + b d = torch.sin (c) e = torch.tan (c) torch.sin_(c) print ("sin-in-place:") print(c.cpu()) print ("sin explicit:") print (d.cpu ()) a = torch.tensor([[10, 10]], dtype=torch.float).to(device) b = torch.tensor([[3.3, 3.3]]).to(device) c = torch.fmod(a, b) print(c.cpu()) a = torch.tensor([[5, 3, -5]], dtype=torch.float).to(device) b = torch.hardshrink(a, 3) #should be [5, 0, -5] c = torch.nn.functional.softshrink(a, 3) #should be [2, 0, -2] print(b.cpu())