def test_crop_grayscale(height=375, width=375): """ Test that centercrop works with pad and grayscale images """ # Note: image.transpose performs channel swap to allow py transforms to # work with c transforms transforms = [ py_vision.Decode(), py_vision.Grayscale(1), py_vision.ToTensor(), (lambda image: (image.transpose(1, 2, 0) * 255).astype(np.uint8)) ] transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = data1.map(operations=transform, input_columns=["image"]) # If input is grayscale, the output dimensions should be single channel crop_gray = vision.CenterCrop([height, width]) data1 = data1.map(operations=crop_gray, input_columns=["image"]) for item1 in data1.create_dict_iterator(num_epochs=1, output_numpy=True): c_image = item1["image"] # Check that the image is grayscale assert (c_image.ndim == 3 and c_image.shape[2] == 1)
def util_test_random_color_adjust_error(brightness=(1, 1), contrast=(1, 1), saturation=(1, 1), hue=(0, 0)): """ Util function that tests the error message in case of grayscale images """ transforms = [ py_vision.Decode(), py_vision.Grayscale(1), py_vision.ToTensor(), (lambda image: (image.transpose(1, 2, 0) * 255).astype(np.uint8)) ] transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = data1.map(operations=transform, input_columns=["image"]) # if input is grayscale, the output dimensions should be single channel, the following should fail random_adjust_op = c_vision.RandomColorAdjust(brightness=brightness, contrast=contrast, saturation=saturation, hue=hue) with pytest.raises(RuntimeError) as info: data1 = data1.map(operations=random_adjust_op, input_columns=["image"]) dataset_shape_1 = [] for item1 in data1.create_dict_iterator(num_epochs=1): c_image = item1["image"] dataset_shape_1.append(c_image.shape) error_msg = "image shape is not <H,W,C>" assert error_msg in str(info.value)
def test_random_grayscale_input_grayscale_images(): """ Test RandomGrayscale Op: valid parameter with grayscale images as input, expect to pass """ logger.info("test_random_grayscale_input_grayscale_images") original_seed = config_get_set_seed(0) original_num_parallel_workers = config_get_set_num_parallel_workers(1) # First dataset data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) transforms1 = [ py_vision.Decode(), py_vision.Grayscale(1), # Note: If the input images is grayscale image with 1 channel. py_vision.RandomGrayscale(0.5), py_vision.ToTensor() ] transform1 = mindspore.dataset.transforms.py_transforms.Compose( transforms1) data1 = data1.map(operations=transform1, input_columns=["image"]) # Second dataset data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) transforms2 = [py_vision.Decode(), py_vision.ToTensor()] transform2 = mindspore.dataset.transforms.py_transforms.Compose( transforms2) data2 = data2.map(operations=transform2, input_columns=["image"]) image_gray = [] image = [] for item1, item2 in zip( data1.create_dict_iterator(num_epochs=1, output_numpy=True), data2.create_dict_iterator(num_epochs=1, output_numpy=True)): image1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8) image2 = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8) image_gray.append(image1) image.append(image2) assert len(image1.shape) == 3 assert image1.shape[2] == 1 assert len(image2.shape) == 3 assert image2.shape[2] == 3 # Restore config ds.config.set_seed(original_seed) ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_pad_grayscale(): """ Tests that the pad works for grayscale images """ # Note: image.transpose performs channel swap to allow py transforms to # work with c transforms transforms = [ py_vision.Decode(), py_vision.Grayscale(1), py_vision.ToTensor(), (lambda image: (image.transpose(1, 2, 0) * 255).astype(np.uint8)) ] transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = data1.map(operations=transform, input_columns=["image"]) # if input is grayscale, the output dimensions should be single channel pad_gray = c_vision.Pad(100, fill_value=(20, 20, 20)) data1 = data1.map(operations=pad_gray, input_columns=["image"]) dataset_shape_1 = [] for item1 in data1.create_dict_iterator(num_epochs=1, output_numpy=True): c_image = item1["image"] dataset_shape_1.append(c_image.shape) # Dataset for comparison data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) decode_op = c_vision.Decode() # we use the same padding logic ctrans = [decode_op, pad_gray] dataset_shape_2 = [] data2 = data2.map(operations=ctrans, input_columns=["image"]) for item2 in data2.create_dict_iterator(num_epochs=1, output_numpy=True): c_image = item2["image"] dataset_shape_2.append(c_image.shape) for shape1, shape2 in zip(dataset_shape_1, dataset_shape_2): # validate that the first two dimensions are the same # we have a little inconsistency here because the third dimension is 1 after py_vision.Grayscale assert shape1[0:1] == shape2[0:1]
def util_test_normalize_grayscale(num_output_channels, mean, std): """ Utility function for testing Normalize. Input arguments are given by other tests """ transforms = [ py_vision.Decode(), py_vision.Grayscale(num_output_channels), py_vision.ToTensor(), py_vision.Normalize(mean, std) ] transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) # Generate dataset data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = data.map(operations=transform, input_columns=["image"]) return data