Пример #1
0
    def __init__(self, parent=None):
        super(MainWindow, self).__init__(parent=parent)
        self.setupUi(self)
        self.setWindowIcon(QIcon('Software GUI/beauty.ico'))
        sys.stdout = EmittingStr(textWritten=self.outputWritten)
        sys.stderr = EmittingStr(textWritten=self.outputWritten)
        self.pushButton.clicked.connect(self.load_source)
        self.pushButton_2.clicked.connect(self.load_target)
        self.pushButton_3.clicked.connect(self.play_input_video)
        self.pushButton_4.clicked.connect(self.play_result_video)
        self.pushButton_5.clicked.connect(self.save_result)
        self.pushButton_skyrpl.clicked.connect(self.skyrpl)

        # self.checkpoint_load = 'test6_lovasz_1e-2/checkpoint_19_epoch.pkl'
        # self.checkpoint_load = 'test6_lovasz_1e-2/bestdice_min_38.57%_checkpoint_55_epoch.pkl'
        # self.checkpoint_load = 'test4_lovasz_1e-2/bestdice_min_47.90%_checkpoint_35_epoch.pkl'
        self.checkpoint_load = 'tools/checkpoint_199_epoch.pkl'
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        self.net = UNet(
            in_channels=3, out_channels=1,
            init_features=32)  # init_features is 64 in stander uent
        self.net.to(self.device)
        self.net.eval()
        if self.checkpoint_load is not None:
            checkpoint = torch.load(self.checkpoint_load)
            self.net.load_state_dict(checkpoint['model_state_dict'])
            print(
                '\nWelcome to use Magic Sky Software. \nPytorch model loads checkpoint from %s'
                % self.checkpoint_load)
        else:
            raise Exception("\nPlease specify the checkpoint")
        set_seed()  # 设置随机种子
Пример #2
0
# -*- coding: utf-8 -*-
"""
# @file name  : bn_and_initialize.py
# @author     : TingsongYu https://github.com/TingsongYu
# @date       : 2019-11-03
# @brief      : pytorch中常见的 normalization layers
"""
import torch
import numpy as np
import torch.nn as nn
from tools.common_tools import set_seed


set_seed(1)  # 设置随机种子

# ======================================== nn.layer norm
# Layer归一化
flag = 1
# flag = 0
if flag:
    batch_size = 8
    num_features = 6

    features_shape = (3, 4)

    feature_map = torch.ones(features_shape)  # 2D
    feature_maps = torch.stack([feature_map * (i + 1) for i in range(num_features)], dim=0)  # 3D
    feature_maps_bs = torch.stack([feature_maps for i in range(batch_size)], dim=0)  # 4D

    # feature_maps_bs shape is [8, 6, 3, 4],  B * C * H * W
    # ln = nn.LayerNorm(feature_maps_bs.size()[1:], elementwise_affine=True) # 该层特征的形状通常舍掉第一个batchsize维度
Пример #3
0
import torch.optim as optim
import torchvision.models as models

import sys
hello_pytorch_DIR = os.path.abspath(
    os.path.dirname(__file__) + os.path.sep + ".." + os.path.sep + "..")
sys.path.append(hello_pytorch_DIR)

from tools.common_tools import set_seed
from tools.my_dataset import PortraitDataset
from tools.unet import UNet

BASE_DIR = os.path.dirname(os.path.abspath(__file__))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

set_seed()  # 设置随机种子


def compute_dice(y_pred, y_true):
    """
    :param y_pred: 4-d tensor, value = [0,1]
    :param y_true: 4-d tensor, value = [0,1]
    :return:
    """
    y_pred, y_true = np.array(y_pred), np.array(y_true)
    y_pred, y_true = np.round(y_pred).astype(int), np.round(y_true).astype(int)
    return np.sum(
        y_pred[y_true == 1]) * 2.0 / (np.sum(y_pred) + np.sum(y_true))


def get_img_name(img_dir, format="jpg"):
Пример #4
0
# -*- coding: utf-8 -*-
import os
import sys

sys.path.append('.')
import cv2
import random
import numpy as np
import torch
from PIL import Image
from torch.utils.data import Dataset
from glob import glob
from matplotlib import pyplot as plt
from tools.common_tools import set_seed

set_seed()

class SkyDataset(Dataset):

    def __init__(self, data_dir, transform=None, in_size=224):
        """

        Args:
            data_dir:
            transform:
            in_size:
        """
        super(SkyDataset, self).__init__()
        self.data_dir = data_dir
        self.transform = transform
        self.label_path_list = list()
Пример #5
0
# -*- coding: utf-8 -*-
"""
# @file name  : nn_layers_convolution.py
# @author     : tingsongyu
# @date       : 2019-09-23 10:08:00
# @brief      : 学习卷积层
"""
import os
import torch.nn as nn
from PIL import Image
from torchvision import transforms
from matplotlib import pyplot as plt
from tools.common_tools import transform_invert, set_seed

set_seed(0)  # 设置随机种子

# ================================= load img ==================================
path_img = os.path.join(os.path.dirname(os.path.abspath(__file__)), "lena.png")
img = Image.open(path_img).convert('RGB')  # 0~255

# convert to tensor
img_transform = transforms.Compose([transforms.ToTensor()])
img_tensor = img_transform(img)
img_tensor.unsqueeze_(dim=0)  # C*H*W to B*C*H*W

# ================================= create convolution layer ==================================

# ================ 2d
flag = 1
# flag = 0
if flag:
Пример #6
0
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


def remove_module(state_dict_g):
    # remove module.
    from collections import OrderedDict

    new_state_dict = OrderedDict()
    for k, v in state_dict_g.items():
        namekey = k[7:] if k.startswith('module.') else k
        new_state_dict[namekey] = v

    return new_state_dict


set_seed(1998)  # 设置随机种子

# config
path_checkpoint = os.path.join(BASE_DIR, "checkpoint_14_epoch.pkl")
image_size = 64
num_img = 64
nc = 3
nz = 100
ngf = 128
ndf = 128

d_transforms = transforms.Compose([
    transforms.Resize(image_size),
    transforms.CenterCrop(image_size),
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
Пример #7
0
def unet_infer(demo_path_img, demo, save_result):
    """

    Args:
        demo_path_img:
        demo:
        save_result:

    Returns:

    """
    # demo = True
    # demo_path_img = 'd:/MyLearning/DIP/Final_Project/Unet/Demo/1.jpg'
    # save_result = True

    testset_path = os.path.join("dataset/testset")
    checkpoint_load = 'tools/checkpoint_199_epoch.pkl'
    shuffle_dataset = True

    vis_num = 1000
    mask_thres = 0.5
    ##########################################################

    BASE_DIR = os.path.dirname(os.path.abspath(__file__))
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    set_seed()  # 设置随机种子
    in_size = 224

    if not demo:
        testset = SkyDataset(testset_path)
        valid_loader = DataLoader(testset,
                                  batch_size=1,
                                  drop_last=False,
                                  shuffle=False)
    else:
        img_pil = Image.open(demo_path_img).convert('RGB')
        original_img = np.array(img_pil)
        w, h = img_pil.size
        img_pil = img_pil.resize((in_size, in_size), Image.BILINEAR)

        img_hwc = np.array(img_pil)
        img_chw = img_hwc.transpose((2, 0, 1))
        img_chw = torch.from_numpy(img_chw).float()

    net = UNet(in_channels=3, out_channels=1,
               init_features=32)  # init_features is 64 in stander uent
    net.to(device)
    if checkpoint_load is not None:
        path_checkpoint = checkpoint_load
        checkpoint = torch.load(path_checkpoint)

        net.load_state_dict(checkpoint['model_state_dict'])
        print('load checkpoint from %s' % path_checkpoint)
    else:
        raise Exception("\nPlease specify the checkpoint")

    net.eval()
    with torch.no_grad():
        if not demo:
            for idx, (inputs, labels) in enumerate(valid_loader):
                if idx > vis_num:
                    break
                if torch.cuda.is_available():
                    inputs, labels = inputs.to(device), labels.to(device)
                outputs = net(inputs)

                pred = (outputs.cpu().data.numpy() * 255).astype("uint8")
                pred_gray = pred.squeeze()

                mask_pred = outputs.ge(mask_thres).cpu().data.numpy()
                mask_pred_gray = (mask_pred.squeeze() * 255).astype("uint8")

                print('idx>>%d, Dice>>%.4f' %
                      (idx, compute_dice(mask_pred,
                                         labels.cpu().numpy())))
                img_hwc = inputs.cpu().data.numpy()[0, :, :, :].transpose(
                    (1, 2, 0)).astype("uint8")
                img_label = (labels.cpu().data.numpy()[0, 0, :, :] *
                             255).astype("uint8")
                plt.subplot(221).imshow(img_hwc)
                plt.title('%d Original IMG' % idx)
                plt.subplot(222).imshow(img_label, cmap="gray")
                plt.title('%d Original Label' % idx)
                plt.subplot(223).imshow(mask_pred_gray, cmap="gray")
                plt.title('%d Binary Label' % idx)
                plt.subplot(224).imshow(pred_gray, cmap="gray")
                plt.title('%d Raw Label' % idx)
                plt.tight_layout()
                plt.savefig('results/%d_img' % idx)
                plt.show()
                plt.close()
                if save_result:
                    pred_gray_img = Image.fromarray(pred_gray)
                    pred_gray_img.save('results/%d_pred_gray_img.png' % idx)

                    img_hwc_img = Image.fromarray(img_hwc)
                    img_hwc_img.save('results/%d_img_hwc.png' % idx)
        else:
            inputs = img_chw.to(device).unsqueeze(0)
            outputs = net(inputs)

            pred = (outputs.cpu().data.numpy() * 255).astype("uint8")
            pred_gray = pred.squeeze()

            mask_pred = outputs.ge(mask_thres).cpu().data.numpy()
            mask_pred_gray = (mask_pred.squeeze() * 255).astype("uint8")

            img_hwc = inputs.cpu().data.numpy()[0, :, :, :].transpose(
                (1, 2, 0)).astype("uint8")

            if save_result:
                pred_gray_img = Image.fromarray(pred_gray)
                pred_gray_img = pred_gray_img.resize((w, h), Image.BICUBIC)
                pred_gray_img.save(
                    'd:/MyLearning/DIP/Final_Project/Unet/results/1_pred_gray_img.png'
                )
                mask_pred_gray_img = Image.fromarray(mask_pred_gray)
                mask_pred_gray_img = mask_pred_gray_img.resize((w, h),
                                                               Image.BICUBIC)
                mask_pred_gray_img.save(
                    'd:/MyLearning/DIP/Final_Project/Unet/results/1_mask_pred_gray_img.png'
                )
                img_hwc_img = Image.open(demo_path_img).convert('RGB')
                img_hwc_img.save(
                    'd:/MyLearning/DIP/Final_Project/Unet/results/1_img_hwc_img.png'
                )
            # plt.subplot(131).imshow(img_hwc)
            # plt.subplot(132).imshow(mask_pred_gray, cmap="gray")
            # plt.subplot(133).imshow(pred_gray, cmap="gray")
            # plt.show()
            # plt.pause(0.5)
            # plt.close()

            # img_hwc = Image.fromarray(img_hwc)
            # img_hwc = img_hwc.resize((w, h), Image.BILINEAR)
            # img_hwc = np.array(img_hwc)
            mask_pred_gray = Image.fromarray(mask_pred_gray)
            mask_pred_gray = mask_pred_gray.resize((w, h), Image.BILINEAR)
            mask_pred_gray = np.array(mask_pred_gray)

            return original_img, mask_pred_gray
Пример #8
0
# -*- coding: utf-8 -*-
"""
# @file name  : nn_layers_convolution.py
# @author     : tingsongyu
# @date       : 2019-09-23 10:08:00
# @brief      : 学习卷积层
"""
import os
import torch.nn as nn
from PIL import Image
from torchvision import transforms
from matplotlib import pyplot as plt
from tools.common_tools import transform_invert, set_seed

set_seed(3)  # 设置随机种子

# ================================= load img ==================================
path_img = os.path.join(os.path.dirname(os.path.abspath(__file__)), "lena.png")
img = Image.open(path_img).convert('RGB')  # 0~255

# convert to tensor
img_transform = transforms.Compose([transforms.ToTensor()])
img_tensor = img_transform(img)
img_tensor.unsqueeze_(dim=0)  # C*H*W to B*C*H*W

# ================================= create convolution layer ==================================

# ================ 2d
flag = 1
# flag = 0
if flag:
Пример #9
0
def video_infer(img_pil):
    """

    Args:
        img_pil:

    Returns:

    """
    checkpoint_load = 'tools/checkpoint_199_epoch.pkl'

    vis_num = 1000
    mask_thres = 0.5
    ##########################################################

    BASE_DIR = os.path.dirname(os.path.abspath(__file__))
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    set_seed()  # 设置随机种子
    in_size = 224
    # img_pil = demo_img.convert('RGB')
    original_img = np.array(img_pil)
    h, w, _ = img_pil.shape
    # img_pil = img_pil.resize((in_size, in_size), Image.BILINEAR)
    img_pil = cv2.resize(img_pil, (in_size, in_size),
                         interpolation=cv2.INTER_AREA)

    img_hwc = np.array(img_pil)
    img_chw = img_hwc.transpose((2, 0, 1))
    img_chw = torch.from_numpy(img_chw).float()

    net = UNet(in_channels=3, out_channels=1,
               init_features=32)  # init_features is 64 in stander uent
    net.to(device)
    if checkpoint_load is not None:
        path_checkpoint = checkpoint_load
        checkpoint = torch.load(path_checkpoint)

        net.load_state_dict(checkpoint['model_state_dict'])
        # print('load checkpoint from %s' % path_checkpoint)
    else:
        raise Exception("\nPlease specify the checkpoint")

    net.eval()
    with torch.no_grad():

        inputs = img_chw.to(device).unsqueeze(0)
        outputs = net(inputs)

        pred = (outputs.cpu().data.numpy() * 255).astype("uint8")
        pred_gray = pred.squeeze()

        mask_pred = outputs.ge(mask_thres).cpu().data.numpy()
        mask_pred_gray = (mask_pred.squeeze() * 255).astype("uint8")

        img_hwc = inputs.cpu().data.numpy()[0, :, :, :].transpose(
            (1, 2, 0)).astype("uint8")

    mask_pred_gray = Image.fromarray(mask_pred_gray)
    mask_pred_gray = mask_pred_gray.resize((w, h), Image.BILINEAR)
    mask_pred_gray = np.array(mask_pred_gray)

    return original_img, mask_pred_gray
# -*- coding: utf-8 -*-
"""
# @file name  : 3.nn_layers_convolution.py
# @author     : tingsongyu
# @date       : 2019-09-23 10:08:00
# @brief      : 学习卷积层
"""
import os
import torch.nn as nn
from PIL import Image
from torchvision import transforms
from matplotlib import pyplot as plt
from tools.common_tools import transform_invert, set_seed

set_seed(2)  # 设置随机种子

# ================================= load img ==================================
path_img = os.path.join(os.path.dirname(os.path.abspath(__file__)), "lena.png")
img = Image.open(path_img).convert('RGB')  # 0~255

# convert to tensor
img_transform = transforms.Compose([transforms.ToTensor()])  #b把rgb图像转化成张量
img_tensor = img_transform(img)
img_tensor.unsqueeze_(dim=0)  # C*H*W to B*C*H*W 增加batchsize维度

# ================================= create convolution layer ==================================

# ================ 2d
# flag = 1
flag = 0
if flag:
Пример #11
0
import torch.nn as nn
from PIL import Image
from torchvision import transforms
from tools.common_tools import transform_invert, set_seed
from matplotlib import pyplot as plt
set_seed(2)

# ================================= load img ==================================
path_img = "lena.png"
img = Image.open(path_img).convert('RGB')  # 0~255

# convert to tensor
img_transform = transforms.Compose([transforms.ToTensor()])
img_tensor = img_transform(img)
img_tensor.unsqueeze_(dim=0)  # C*H*W to B*C*H*W

# ================ 3d kernel (1, 3, 3)
# flag = 1
flag = 0
if flag:
    conv_layer = nn.Conv3d(3, 1, (1, 3, 3), padding=(1, 0, 0), bias=False)
    nn.init.xavier_normal_(conv_layer.weight.data)

    # calculation
    img_tensor.unsqueeze_(dim=2)  # B*C*H*W to B*C*D*H*W
    img_conv = conv_layer(img_tensor)

    # ================================= visualization ==================================
    print("卷积前尺寸:{}\n卷积后尺寸:{}".format(img_tensor.shape, img_conv.shape))
    img_conv = transform_invert(img_conv.squeeze(), img_transform)
    img_raw = transform_invert(img_tensor.squeeze(), img_transform)
Пример #12
0
# -*- coding: utf-8 -*-
"""
# @file name  : nn_layers_convolution.py
# @copyright  : tingsongyu
# @author     : perry
# @date       : 2019-04-26
# @brief      : 学习卷积层
"""
import os
import torch.nn as nn
from PIL import Image
from torchvision import transforms
from matplotlib import pyplot as plt
from tools.common_tools import transform_invert, set_seed

set_seed(4)  # 设置随机种子 (改变随机种子的数字相当于改变随机的卷积层初始权重,输出图像随之改变)

# ================================= load img ==================================
# 读取RGB图像
path_img = os.path.join(os.path.dirname(os.path.abspath(__file__)), "lena.png")
img = Image.open(path_img).convert('RGB')  # 0~255

# convert to tensor 转换成张量
img_transform = transforms.Compose([transforms.ToTensor()])
img_tensor = img_transform(img)
img_tensor.unsqueeze_(dim=0)  # C*H*W to B*C*H*W 拓展为四维张量,B=Batchsize

# ================================= create convolution layer ==================================

# ================ 2d 创建2维正常卷积 ========================================================
# flag = 1
Пример #13
0
"""
模型的保存,使用保存整个模型、保存参数两种方式
"""
import torch
import torch.nn as nn
from tools.common_tools import set_seed
set_seed(2020)


class LeNet2(nn.Module):
    def __init__(self, classes):
        super(LeNet2, self).__init__()
        self.features = nn.Sequential(nn.Conv2d(3, 6, 5), nn.ReLU(),
                                      nn.MaxPool2d(2, 2), nn.Conv2d(6, 16, 5),
                                      nn.ReLU(), nn.MaxPool2d(2, 2))
        self.classifier = nn.Sequential(nn.Linear(16 * 5 * 5, 120), nn.ReLU(),
                                        nn.Linear(120, 84), nn.ReLU(),
                                        nn.Linear(84, classes))

    def forward(self, x):
        x = self.features(x)
        x = x.view(x.size()[0], -1)
        x = self.classifier(x)
        return x

    def initialize(self):
        for p in self.parameters():
            p.data.fill_(20200309)


net = LeNet2(classes=2020)