def test_fuzz(self): input_size_pairs = ( (7777*77, 555*555), (777, 555), (555, 2048*32+1), (2048*32+1, 555), (555, 2048*32), (2048*32, 555), (33333, 555), (555, 33333)) appliers = ( MultiTensorApply(2048*32), MultiTensorApply(333), MultiTensorApply(33333)) repeat_tensors = ( 1, 55) for sizea, sizeb in input_size_pairs: for applier in appliers: for repeat in repeat_tensors: for x_type in (torch.float32, torch.float16): for y_type in (torch.float32, torch.float16): for out_type in (torch.float32, torch.float16): for inplace in (True, False): if inplace is True and (y_type is not out_type): continue else: self.axpby(sizea, sizeb, applier, repeat, x_type, y_type, out_type, inplace=inplace)
def test_fuzz_nhwc(self): input_size_pairs = (((7, 77, 7, 77), (5, 55, 5, 55)), ((1, 1, 777, 1), (1, 1, 555, 1)), ((5, 47, 5, 55), (1, 1, 1, 2048 * 32 + 1)), ((1, 1, 1, 2048 * 32 + 1), (55, 47, 5, 55)), ((555, 1, 1, 1), (32, 8, 32, 8)), ((32, 8, 32, 8), (55, 47, 5, 55)), ((1, 1, 33333, 1), (55, 47, 55, 5)), ((55, 47, 55, 5), (1, 1, 33333, 1))) appliers = (MultiTensorApply(2048 * 32), MultiTensorApply(333), MultiTensorApply(33333)) repeat_tensors = (1, 55) for sizea, sizeb in input_size_pairs: for applier in appliers: for repeat in repeat_tensors: for x_type in (torch.float32, torch.float16): for y_type in (torch.float32, torch.float16): for out_type in (torch.float32, torch.float16): for inplace in (True, False): if inplace is True and (y_type is not out_type): continue else: self.axpby(sizea, sizeb, applier, repeat, x_type, y_type, out_type, inplace=inplace, nhwc=True)
def test_fuzz(self): input_size_pairs = ( (7777*77, 555*555), (777, 555), (555, 2048*32+1), (2048*32+1, 555), (555, 2048*32), (2048*32, 555), (33333, 555), (555, 33333)) appliers = ( MultiTensorApply(2048*32), MultiTensorApply(333), MultiTensorApply(33333)) repeat_tensors = ( 1, 55) for sizea, sizeb in input_size_pairs: for applier in appliers: for repeat in repeat_tensors: for in_type in (torch.float32, torch.float16): for out_type in (torch.float32, torch.float16): for inplace in (True, False): if inplace is True and (out_type is not in_type): continue else: self.downscale(sizea, sizeb, applier, repeat, in_type, out_type, inplace=inplace) self.find_inf(sizea, sizeb, applier, repeat, in_type, out_type, 0, 0, float('nan'), inplace=inplace) self.find_inf(sizea, sizeb, applier, repeat, in_type, out_type, 2*repeat-1, sizeb-1, float('inf'), inplace=inplace) self.find_inf(sizea, sizeb, applier, repeat, in_type, out_type, 2*(repeat//2), sizea//2, float('inf'), inplace=inplace)
def test_fuzz(self): input_size_pairs = ((7777 * 77, 555 * 555), (777, 555), (555, 2048 * 32 + 1), (2048 * 32 + 1, 555), (555, 2048 * 32), (2048 * 32, 555), (33333, 555), (555, 33333)) appliers = (MultiTensorApply(2048 * 32), MultiTensorApply(333), MultiTensorApply(33333)) repeat_tensors = (1, 55) for sizea, sizeb in input_size_pairs: for applier in appliers: for repeat in repeat_tensors: for in_type in (torch.float32, torch.float16): for per_tensor in (False, True): self.l2norm(sizea, sizeb, applier, repeat, in_type, per_tensor)