Esempio n. 1
0
        edg_de_list = edg_decoder(en_list[::-1])
        cor_de_list = cor_decoder(en_list[-1:] + edg_de_list[:-1])

        edg_tensor = torch.sigmoid(edg_de_list[-1])
        cor_tensor = torch.sigmoid(cor_de_list[-1])

        # Recover the effect from augmentation
        edg_img = augment_undo(edg_tensor.cpu().numpy(), aug_type)
        cor_img = augment_undo(cor_tensor.cpu().numpy(), aug_type)

        # Merge all results from augmentation
        edg_img = edg_img.transpose([0, 2, 3, 1]).mean(0)
        cor_img = cor_img.transpose([0, 2, 3, 1]).mean(0)

    # Generate boundary image
    bon_img = draw_boundary(cor_img[..., 0], i_img * 255)

    # Composite output image with rgb image
    edg_img = args.alpha * edg_img + (1 - args.alpha) * i_img
    cor_img = args.alpha * cor_img + (1 - args.alpha) * i_img

    # All in one image
    all_in_one = 0.3 * edg_img + 0.3 * cor_img + 0.4 * i_img
    all_in_one = draw_boundary(cor_img[..., 0], all_in_one * 255)

    # Dump result
    basename = os.path.splitext(os.path.basename(i_path))[0]
    edg_path = os.path.join(args.output_dir, '%s_edg.png' % basename)
    cor_path = os.path.join(args.output_dir, '%s_cor.png' % basename)
    bon_path = os.path.join(args.output_dir, '%s_bon.png' % basename)
    all_in_one_path = os.path.join(args.output_dir, '%s_all.png' % basename)
Esempio n. 2
0
        en_list = encoder(x)
        edg_de_list = edg_decoder(en_list[::-1])
        cor_de_list = cor_decoder(en_list[-1:] + edg_de_list[:-1])

        edg_tensor = torch.sigmoid(edg_de_list[-1])
        cor_tensor = torch.sigmoid(cor_de_list[-1])

        # Recover the effect from augmentation
        edg_img = augment_undo(edg_tensor.cpu().numpy(), aug_type)
        cor_img = augment_undo(cor_tensor.cpu().numpy(), aug_type)

        # Merge all results from augmentation
        edg_img = edg_img.transpose([0, 2, 3, 1]).mean(0)
        cor_img = cor_img.transpose([0, 2, 3, 1]).mean(0)

    # Generate boundary image
    bon_img = draw_boundary(edg_img * 255, cor_img[..., 0] * 255, i_img * 255)

    # Composite output image with rgb image
    edg_img = args.alpha * edg_img + (1 - args.alpha) * i_img
    cor_img = args.alpha * cor_img + (1 - args.alpha) * i_img

    # Dump result
    basename = os.path.splitext(os.path.basename(i_path))[0]
    edg_path = os.path.join(args.output_dir, '%s_edg.png' % basename)
    cor_path = os.path.join(args.output_dir, '%s_cor.png' % basename)
    bon_path = os.path.join(args.output_dir, '%s_bon.png' % basename)
    Image.fromarray((edg_img * 255).astype(np.uint8)).save(edg_path)
    Image.fromarray((cor_img * 255).astype(np.uint8)).save(cor_path)
    Image.fromarray(bon_img).save(bon_path)
Esempio n. 3
0
        edg_tensor = torch.sigmoid(edg_de_list[-1])
        cor_tensor = torch.sigmoid(cor_de_list[-1])

        # Recover the effect from augmentation
        edg_img = augment_undo(edg_tensor.cpu().numpy(), aug_type)
        cor_img = augment_undo(cor_tensor.cpu().numpy(), aug_type)

        # Merge all results from augmentation
        edg_img = edg_img.transpose([0, 2, 3, 1]).mean(0)
        cor_img = cor_img.transpose([0, 2, 3, 1]).mean(0)

    cormap = cor_img[..., 0].copy()

    # Generate boundary image
    bon_img = draw_boundary(cormap.copy(), i_img * 255)

    # Composite output image with rgb image
    edg_img = args.alpha * edg_img + (1 - args.alpha) * i_img
    cor_img = args.alpha * cor_img + (1 - args.alpha) * i_img

    # All in one image
    all_in_one = 0.3 * edg_img + 0.3 * cor_img + 0.4 * i_img
    all_in_one = draw_boundary(cormap.copy(), all_in_one * 255)

    # Dump result
    basename = os.path.splitext(os.path.basename(i_path))[0]
    edg_path = os.path.join(args.output_dir, '%s_edg.png' % basename)
    cor_path = os.path.join(args.output_dir, '%s_cor.png' % basename)
    bon_path = os.path.join(args.output_dir, '%s_bon.png' % basename)
    all_in_one_path = os.path.join(args.output_dir, '%s_all.png' % basename)
Esempio n. 4
0
        en_list = encoder(x)
        edg_de_list = edg_decoder(en_list[::-1])
        cor_de_list = cor_decoder(en_list[-1:] + edg_de_list[:-1])

        edg_tensor = torch.sigmoid(edg_de_list[-1])
        cor_tensor = torch.sigmoid(cor_de_list[-1])

        # Recover the effect from augmentation
        edg_img = augment_undo(edg_tensor.cpu().numpy(), aug_type)
        cor_img = augment_undo(cor_tensor.cpu().numpy(), aug_type)

        # Merge all results from augmentation
        edg_img = edg_img.transpose([0, 2, 3, 1]).mean(0)
        cor_img = cor_img.transpose([0, 2, 3, 1]).mean(0)

    # Generate boundary image
    bon_img = draw_boundary(cor_img[..., 0], i_img * 255)

    # Composite output image with rgb image
    edg_img = args.alpha * edg_img + (1 - args.alpha) * i_img
    cor_img = args.alpha * cor_img + (1 - args.alpha) * i_img

    # Dump result
    basename = os.path.splitext(os.path.basename(i_path))[0]
    edg_path = os.path.join(args.output_dir, '%s_edg.png' % basename)
    cor_path = os.path.join(args.output_dir, '%s_cor.png' % basename)
    bon_path = os.path.join(args.output_dir, '%s_bon.png' % basename)
    Image.fromarray((edg_img * 255).astype(np.uint8)).save(edg_path)
    Image.fromarray((cor_img * 255).astype(np.uint8)).save(cor_path)
    Image.fromarray(bon_img).save(bon_path)