def test_info_smoke(self): # Smoke test for info functions / methods / attributes on named tensors. tensor = torch.empty(1, 1, names=('N', 'D')) tensor.device tensor.dtype tensor.get_device() tensor.is_complex() tensor.is_floating_point() tensor.is_nonzero() torch.is_same_size(tensor, tensor) torch.is_signed(tensor) tensor.layout tensor.numel() tensor.dim() tensor.element_size() tensor.is_contiguous() tensor.is_cuda tensor.is_leaf tensor.is_pinned() tensor.is_shared() tensor.is_sparse tensor.ndimension() tensor.nelement() tensor.shape tensor.size() tensor.storage() tensor.storage_offset() tensor.storage_type() tensor.stride() tensor.data tensor.data_ptr() tensor.ndim tensor.item()
def _make_grads(outputs: Sequence[torch.Tensor], grads: Sequence[_OptionalTensor], is_grads_batched: bool) -> Tuple[_OptionalTensor, ...]: new_grads: List[_OptionalTensor] = [] for out, grad in zip(outputs, grads): if isinstance(grad, torch.Tensor): first_grad = grad if not is_grads_batched else grad[0] if not torch.is_same_size(out, first_grad): out_shape, grad_shape = _calculate_shape( out, first_grad, is_grads_batched) if is_grads_batched: raise RuntimeError( "If `is_grads_batched=True`, we interpret the first " "dimension of each grad_output as the batch dimension. " "The sizes of the remaining dimensions are expected to match " "the shape of corresponding output, but a mismatch " "was detected: grad_output[" + str(grads.index(grad)) + "] has a shape of " + str(grad_shape) + " and output[" + str(outputs.index(out)) + "] has a shape of " + str(out_shape) + ". " "If you only want some tensors in `grad_output` to be considered " "batched, consider using vmap.") else: raise RuntimeError("Mismatch in shape: grad_output[" + str(grads.index(grad)) + "] has a shape of " + str(grad_shape) + " and output[" + str(outputs.index(out)) + "] has a shape of " + str(out_shape) + ".") if out.dtype.is_complex != grad.dtype.is_complex: raise RuntimeError( "For complex Tensors, both grad_output and output" " are required to have the same dtype." " Mismatch in dtype: grad_output[" + str(grads.index(grad)) + "] has a dtype of " + str(grad.dtype) + " and output[" + str(outputs.index(out)) + "] has a dtype of " + str(out.dtype) + ".") new_grads.append(grad) elif grad is None: if out.requires_grad: if out.numel() != 1: raise RuntimeError( "grad can be implicitly created only for scalar outputs" ) new_grads.append( torch.ones_like(out, memory_format=torch.preserve_format)) else: new_grads.append(None) else: raise TypeError( "gradients can be either Tensors or None, but got " + type(grad).__name__) return tuple(new_grads)
def test(): model = ConvNet().to(device) test_dataset = MyDataset(test_seismic, test_seismic) test_dataloader = DataLoader(test_dataset, batch_size=BATCH_SIZE, num_workers=3, shuffle=True, drop_last=False) test_impedence = torch.zeros(torch.is_same_size(test_seismic), dtype='float') for itr, (test_dt, test_lable) in enumerate(test_dataloader): test_dt, test_lable = test_dt.to(device), test_lable.to(device) test_dt = test_dt.float() output = model(test_dt) test_impedence[itr, :] = output pathtxt = './output/Predict_Impedence.txt' np.savetxt(pathtxt, np.array(test_impedence)) pathmat = './output/Predict_Impedence.mat' scipio.savemat(pathmat, {'Predict_Impedence': np.array(test_impedence)})