Beispiel #1
0
    def test_gray(self):
        """Unit tests for grayscale transform"""

        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_pil = Image.fromarray(x_np, mode='RGB')
        x_pil_2 = x_pil.convert('L')
        gray_np = np.array(x_pil_2)

        # Test Set: Gray an image with desired number of output channels
        # Case 1: RGB -> 1 channel grayscale
        trans1 = transform.Gray(num_output_channels=1)
        gray_pil_1 = trans1(x_pil)
        gray_np_1 = np.array(gray_pil_1)
        # self.assertEqual(gray_pil_1.mode, 'L', 'mode should be L')
        self.assertEqual(gray_np_1.shape[1:], tuple(x_shape[0:2]),
                         'should be 1 channel')
        assert np.allclose(gray_np / 255, gray_np_1[0], atol=0.01)

        # Case 2: RGB -> 3 channel grayscale
        trans2 = transform.Gray(num_output_channels=3)
        gray_pil_2 = trans2(x_pil)
        gray_np_2 = np.array(gray_pil_2)
        # self.assertEqual(gray_pil_2.mode, 'RGB', 'mode should be RGB')
        self.assertEqual(gray_np_2.shape, tuple(x_shape),
                         'should be 3 channel')
        np.testing.assert_equal(gray_np_2[:, :, 0], gray_np_2[:, :, 1])
        np.testing.assert_equal(gray_np_2[:, :, 1], gray_np_2[:, :, 2])
        assert np.allclose(gray_np / 255, gray_np_2[:, :, 0], atol=0.01)

        # Case 3: 1 channel grayscale -> 1 channel grayscale
        trans3 = transform.Gray(num_output_channels=1)
        gray_pil_3 = trans3(x_pil_2)
        gray_np_3 = np.array(gray_pil_3)
        # self.assertEqual(gray_pil_3.mode, 'L', 'mode should be L')
        self.assertEqual(gray_np_3.shape[1:], tuple(x_shape[0:2]),
                         'should be 1 channel')
        np.testing.assert_allclose(gray_np / 255, gray_np_3[0], atol=0.01)

        # Case 4: 1 channel grayscale -> 3 channel grayscale
        trans4 = transform.Gray(num_output_channels=3)
        gray_pil_4 = trans4(x_pil_2)
        gray_np_4 = np.array(gray_pil_4)
        # self.assertEqual(gray_pil_4.mode, 'RGB', 'mode should be RGB')
        self.assertEqual(gray_np_4.shape, tuple(x_shape),
                         'should be 3 channel')
        np.testing.assert_equal(gray_np_4[:, :, 0], gray_np_4[:, :, 1])
        np.testing.assert_equal(gray_np_4[:, :, 1], gray_np_4[:, :, 2])
        np.testing.assert_allclose(gray_np / 255,
                                   gray_np_4[:, :, 0],
                                   atol=0.01)
Beispiel #2
0
    def __init__(self, root, lmdir, maskdir, cmaskdir, mode="test", load_h=512, load_w=512):
        super().__init__()
        transform_ = [
            transform.Resize((load_h, load_w), Image.BICUBIC),
            transform.ImageNormalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
        ]
        self.transform = transform.Compose(transform_)
        transform_mask_ = [
            transform.Resize((load_h, load_w), Image.BICUBIC),
            transform.Gray(),
        ]
        self.transform_mask = transform.Compose(transform_mask_)

        self.files_A = sorted(glob.glob(root + "/*.*"))

        self.total_len = len(self.files_A)
        self.batch_size = None
        self.shuffle = False
        self.drop_last = False
        self.num_workers = None
        self.buffer_size = 512*1024*1024

        self.lmdir = lmdir
        self.maskdir = maskdir
        self.cmaskdir = cmaskdir
        self.load_h = load_h
def get_transform(params, gray=False, mask=False):
    transform_ = []
    # resize
    transform_.append(
        transform.Resize((params['load_h'], params['load_w']), Image.BICUBIC))
    # flip
    if params['flip']:
        transform_.append(transform.Lambda(lambda img: transform.hflip(img)))
    if gray:
        transform_.append(transform.Gray())
    if mask:
        transform_.append(transform.ImageNormalize([
            0.,
        ], [
            1.,
        ]))
    else:
        if not gray:
            transform_.append(
                transform.ImageNormalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]))
        else:
            transform_.append(transform.ImageNormalize([
                0.5,
            ], [
                0.5,
            ]))
    return transform.Compose(transform_)
Beispiel #4
0
    def test_not_pil_image(self):
        img = jt.random((30, 40, 3))
        result = transform.Compose([
            transform.RandomAffine(20),
            transform.ToTensor(),
        ])(img)

        img = jt.random((30, 40, 3))
        result = transform.Compose([
            transform.ToPILImage(),
            transform.Gray(),
            transform.Resize(20),
            transform.ToTensor(),
        ])(img)
Beispiel #5
0

# 损失函数:平方误差
# 调用方法:adversarial_loss(网络输出A, 分类标签B)
# 计算结果:(A-B)^2
adversarial_loss = nn.MSELoss()

generator = Generator()
discriminator = Discriminator()

# 导入MNIST数据集
from jittor.dataset.mnist import MNIST
import jittor.transform as transform
transform = transform.Compose([
    transform.Resize(opt.img_size),
    transform.Gray(),
    transform.ImageNormalize(mean=[0.5], std=[0.5]),
])
dataloader = MNIST(train=True,
                   transform=transform).set_attrs(batch_size=opt.batch_size,
                                                  shuffle=True)

optimizer_G = nn.Adam(generator.parameters(),
                      lr=opt.lr,
                      betas=(opt.b1, opt.b2))
optimizer_D = nn.Adam(discriminator.parameters(),
                      lr=opt.lr,
                      betas=(opt.b1, opt.b2))

from PIL import Image