예제 #1
0
def visualizeAnomalyImage(img, img_, z, z_):
    """
        Visualize the testing result and render the level of abnormality
        The anamoly score map will are also concatenated in the last

        Arg:    img     (torch.Tensor)  - The input image
                img_    (torch.Tensor)  - The reconstructed image
                z       (torch.Tensor)  - The latend representation (tensor shape)
                z_      (torch.Tensor)  - The reconstructed latend representation (tensor shape)
    """
    s_map = visualizeAnomalyMap(img, z, z_)

    # Form the shown image
    img = sunnerTransforms.asImg(img)[0]
    img_ = sunnerTransforms.asImg(img_)[0]
    result = np.hstack((img, img_, s_map * 255.0, img * s_map))
    result = result.astype(np.uint8)

    # --- Plot with matplotlib
    plt.imshow(img, interpolation='nearest')
    plt.imshow(s_map[:, :, 0],
               cmap=plt.cm.viridis,
               alpha=0.5,
               interpolation='nearest')
    plt.show()
def main():
    # Create the fundemental data loader
    loader = sunnerData.DataLoader(sunnerData.ImageDataset(
        root=[['/home/sunner/Music/waiting_for_you_dataset/wait'],
              ['/home/sunner/Music/waiting_for_you_dataset/real_world']],
        transforms=transforms.Compose([
            sunnertransforms.Resize((160, 320)),
            sunnertransforms.ToTensor(),
            sunnertransforms.ToFloat(),
            sunnertransforms.Normalize(mean=[0.5, 0.5, 0.5],
                                       std=[0.5, 0.5, 0.5]),
        ])),
                                   batch_size=32,
                                   shuffle=False,
                                   num_workers=2)

    # Use upper wrapper to assign particular iteration
    loader = sunnerData.IterationLoader(loader, max_iter=1)

    # Show!
    for batch_tensor, _ in loader:
        batch_img = sunnertransforms.asImg(batch_tensor, size=(160, 320))
        cv2.imshow('show_window', batch_img[0][:, :, ::-1])
        cv2.waitKey(0)

        # Or show multiple image in one line
        sunnertransforms.show(batch_tensor[:10], row=2, column=5)
예제 #3
0
def visualizeEncoderDecoder(img, img_, z, z_):
    """
        Visualize the rendered result after adversarial auto-encoder
        The anamoly score map will are also concatenated in the last

        Arg:    img     (torch.Tensor)  - The input image
                img_    (torch.Tensor)  - The reconstructed image
                z       (torch.Tensor)  - The latend representation (tensor shape)
                z_      (torch.Tensor)  - The reconstructed latend representation (tensor shape)
    """
    s_map = visualizeAnomalyMap(img, z, z_)
    img = sunnerTransforms.asImg(img)[0, :, :, ::-1]
    img_ = sunnerTransforms.asImg(img_)[0, :, :, ::-1]
    result = np.hstack((img, img_, s_map * 255.0))
    result = result.astype(np.uint8)
    cv2.imshow('training visualization', result)
    cv2.waitKey(10)
예제 #4
0
def main():
    # Define the loader to generate the pallete object
    loader = sunnerData.DataLoader(
        sunnerData.ImageDataset(
            root=[tag_folder],
            transform=transforms.Compose([
                sunnertransforms.ToTensor(),
            ]),
            save_file=False  # Don't save the record file, be careful!
        ),
        batch_size=2,
        shuffle=False,
        num_workers=2)
    pallete = sunnertransforms.getCategoricalMapping(loader,
                                                     path='pallete.json')[0]
    del loader

    # Define the actual loader
    loader = sunnerData.DataLoader(sunnerData.ImageDataset(
        root=[img_folder, tag_folder],
        transform=transforms.Compose([
            sunnertransforms.Resize((512, 1024)),
            sunnertransforms.ToTensor(),
            sunnertransforms.Transpose(sunnertransforms.BHWC2BCHW),
            sunnertransforms.Normalize(),
        ])),
                                   batch_size=32,
                                   shuffle=False,
                                   num_workers=2)

    # Define the reverse operator
    goto_op = sunnertransforms.CategoricalTranspose(
        pallete=pallete, direction=sunnertransforms.COLOR2ONEHOT)
    back_op = sunnertransforms.CategoricalTranspose(
        pallete=pallete, direction=sunnertransforms.ONEHOT2COLOR)

    # Show!
    for _, batch_index in loader:
        batch_img = back_op(goto_op(batch_index))
        batch_img = sunnertransforms.asImg(batch_img, size=(512, 1024))
        cv2.imshow('show_window', batch_img[0][:, :, ::-1])
        cv2.waitKey(0)
        break
예제 #5
0
def main():
    # Define the loader to generate the pallete object
    loader = sunnerData.DataLoader(sunnerData.ImageDataset(
        root = [
            tag_folder
        ],
        transforms = transforms.Compose([            
            sunnertransforms.ToTensor(),
            sunnertransforms.ToFloat(),
            sunnertransforms.UnNormalize(mean=[0, 0, 0], std=[255, 255, 255]),  # Remember to transfer back to [0~255] before generate pallete 
            sunnertransforms.Transpose(sunnertransforms.BCHW2BHWC)              # Remember to transfer back to BHWC before generate pallete 
        ])
        ), batch_size = 2, shuffle = False, num_workers = 2
    )
    pallete = sunnertransforms.getCategoricalMapping(loader, path = 'pallete.json')[0]
    del loader

    # Define the actual loader
    loader = sunnerData.DataLoader(sunnerData.ImageDataset(
        root = [
            img_folder,
            tag_folder
        ],
        transforms = transforms.Compose([
            sunnertransforms.Resize((512, 1024)),
            sunnertransforms.ToTensor(),
            sunnertransforms.ToFloat(),
            sunnertransforms.Normalize(mean = [0.5, 0.5, 0.5], std = [0.5, 0.5, 0.5]),
        ])), batch_size = 32, shuffle = False, num_workers = 2
    )

    # Define the reverse operator
    goto_op = sunnertransforms.CategoricalTranspose(pallete = pallete, direction = sunnertransforms.COLOR2ONEHOT)
    back_op = sunnertransforms.CategoricalTranspose(pallete = pallete, direction = sunnertransforms.ONEHOT2COLOR)

    # Show!
    for _, batch_index in loader:
        batch_img = back_op(goto_op(batch_index))
        batch_img = sunnertransforms.asImg(batch_img, size = (512, 1024))
        cv2.imshow('show_window', batch_img[0][:, :, ::-1])
        cv2.waitKey(0)
        break