def test_random_grayscale_return_transform(self): grayscale_params_0 = {'batch_prob': torch.tensor([True])} grayscale_params_1 = {'batch_prob': torch.tensor([False])} input = torch.rand(3, 5, 5) # 3 x 5 x 5 expected_transform = torch.eye(3).unsqueeze(0) # 3 x 3 assert_allclose(F.apply_grayscale(input, grayscale_params_0, return_transform=True)[1], expected_transform) assert_allclose(F.apply_grayscale(input, grayscale_params_1, return_transform=True)[1], expected_transform)
def test_opencv_false_batch(self, device): batch_size = 4 grayscale_params = {'batch_prob': torch.tensor([False] * batch_size)} data = torch.tensor( [[[0.3944633, 0.8597369, 0.1670904, 0.2825457, 0.0953912], [0.1251704, 0.8020709, 0.8933256, 0.9170977, 0.1497008], [0.2711633, 0.1111478, 0.0783281, 0.2771807, 0.5487481], [0.0086008, 0.8288748, 0.9647092, 0.8922020, 0.7614344], [0.2898048, 0.1282895, 0.7621747, 0.5657831, 0.9918593]], [[0.5414237, 0.9962701, 0.8947155, 0.5900949, 0.9483274], [0.0468036, 0.3933847, 0.8046577, 0.3640994, 0.0632100], [0.6171775, 0.8624780, 0.4126036, 0.7600935, 0.7279997], [0.4237089, 0.5365476, 0.5591233, 0.1523191, 0.1382165], [0.8932794, 0.8517839, 0.7152701, 0.8983801, 0.5905426]], [[0.2869580, 0.4700376, 0.2743714, 0.8135023, 0.2229074], [0.9306560, 0.3734594, 0.4566821, 0.7599275, 0.7557513], [0.7415742, 0.6115875, 0.3317572, 0.0379378, 0.1315770], [0.8692724, 0.0809556, 0.7767404, 0.8742208, 0.1522012], [0.7708948, 0.4509611, 0.0481175, 0.2358997, 0.6900532]]]) data = data.to(device) data = data.unsqueeze(0).repeat(batch_size, 1, 1, 1) expected = data assert_allclose(F.apply_grayscale(data, grayscale_params), expected)
def test_opencv_true_batch(self, device): batch_size = 4 data = torch.tensor( [ [ [0.3944633, 0.8597369, 0.1670904, 0.2825457, 0.0953912], [0.1251704, 0.8020709, 0.8933256, 0.9170977, 0.1497008], [0.2711633, 0.1111478, 0.0783281, 0.2771807, 0.5487481], [0.0086008, 0.8288748, 0.9647092, 0.8922020, 0.7614344], [0.2898048, 0.1282895, 0.7621747, 0.5657831, 0.9918593], ], [ [0.5414237, 0.9962701, 0.8947155, 0.5900949, 0.9483274], [0.0468036, 0.3933847, 0.8046577, 0.3640994, 0.0632100], [0.6171775, 0.8624780, 0.4126036, 0.7600935, 0.7279997], [0.4237089, 0.5365476, 0.5591233, 0.1523191, 0.1382165], [0.8932794, 0.8517839, 0.7152701, 0.8983801, 0.5905426], ], [ [0.2869580, 0.4700376, 0.2743714, 0.8135023, 0.2229074], [0.9306560, 0.3734594, 0.4566821, 0.7599275, 0.7557513], [0.7415742, 0.6115875, 0.3317572, 0.0379378, 0.1315770], [0.8692724, 0.0809556, 0.7767404, 0.8742208, 0.1522012], [0.7708948, 0.4509611, 0.0481175, 0.2358997, 0.6900532], ], ] ) data = data.to(device) data = data.unsqueeze(0).repeat(batch_size, 1, 1, 1) # Output data generated with OpenCV 4.1.1: cv2.cvtColor(img_np, cv2.COLOR_RGB2GRAY) expected = torch.tensor( [ [ [0.4684734, 0.8954562, 0.6064363, 0.5236061, 0.6106016], [0.1709944, 0.5133104, 0.7915002, 0.5745703, 0.1680204], [0.5279005, 0.6092287, 0.3034387, 0.5333768, 0.6064113], [0.3503858, 0.5720159, 0.7052018, 0.4558409, 0.3261529], [0.6988886, 0.5897652, 0.6532392, 0.7234108, 0.7218805], ], [ [0.4684734, 0.8954562, 0.6064363, 0.5236061, 0.6106016], [0.1709944, 0.5133104, 0.7915002, 0.5745703, 0.1680204], [0.5279005, 0.6092287, 0.3034387, 0.5333768, 0.6064113], [0.3503858, 0.5720159, 0.7052018, 0.4558409, 0.3261529], [0.6988886, 0.5897652, 0.6532392, 0.7234108, 0.7218805], ], [ [0.4684734, 0.8954562, 0.6064363, 0.5236061, 0.6106016], [0.1709944, 0.5133104, 0.7915002, 0.5745703, 0.1680204], [0.5279005, 0.6092287, 0.3034387, 0.5333768, 0.6064113], [0.3503858, 0.5720159, 0.7052018, 0.4558409, 0.3261529], [0.6988886, 0.5897652, 0.6532392, 0.7234108, 0.7218805], ], ] ) expected = expected.to(device) expected = expected.unsqueeze(0).repeat(batch_size, 1, 1, 1) assert_allclose(F.apply_grayscale(data), expected)