def build_model(self):
        print ('==> Build model and setup loss and optimizer')
        #build model
        self.vgg_model = VGGNet(pretrained=False, requires_grad=True)
        self.model = FCNs(pretrained_net=self.vgg_model, n_class=15) 
        if Config.use_cuda:
            self.model = torch.nn.DataParallel(self.model)
            self.model = self.model.cuda()

         
        self.loss_func = nn.MSELoss(size_average=True)  
        self.optimizer = optim.Adam(params=self.model.parameters(), lr=self.lr, betas=(0.5, 0.999))
コード例 #2
0
def detect():
    """
    Initial Model, Load parameters
    """
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    vis = visdom.Visdom(env='detect')
    vgg_model = VGGNet(requires_grad=True, show_params=False)
    fcn_model = FCNs(pretrained_net=vgg_model, n_class=5)
    fcn_model = torch.load('./checkpoints/fcn_model_100.pt')
    fcn_model.to(device).eval()
    """
    Initial test dataset
    """
    bag = BagDataset(transform)
    train_dataset, test_dataset = train_test_split(bag,
                                                   test_size=0.3,
                                                   random_state=42)
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=16,
                                 shuffle=True,
                                 num_workers=16)

    with torch.no_grad():
        for index, (data, data_mask) in enumerate(test_dataloader):
            print("index: " + str(index))
            data = data.to(device)
            data_mask = data_mask.to(device)
            output = fcn_model(data)
            output = torch.sigmoid(
                output)  # output.shape is torch.Size([4, 2, 160, 160])
            output_np = output.cpu().detach().numpy().copy(
            )  # output_np.shape = (4, 2, 160, 160)
            output_np = np.argmin(output_np, axis=1)
            data = data.cpu().detach().numpy().copy()
            print(output_np[0].shape)
            image = data[0]
            image = image_process_cv_to_vis(image)

            for idx in range(len(data)):
                # data[idx] = image_process_cv_to_vis(data[idx])
                data[idx] = image_process_cv_to_vis(data[idx])

            data_mask_np = data_mask.cpu().detach().numpy().copy(
            )  # bag_msk_np.shape = (4, 2, 160, 160)
            data_mask_np = np.argmin(data_mask_np, axis=1)
            vis.images(output_np[:, None, :, :],
                       win='test_pred',
                       opts=dict(title='test prediction'))
            vis.images(data_mask_np[:, None, :, :],
                       win='test_label',
                       opts=dict(title='label'))
            vis.images(data, win='input', opts=dict(title='input'))
コード例 #3
0
def test():
    #vis = visdom.Visdom()
    print(model_path)
    os.environ["CUDA_VISIBLE_DEVICES"] = '2'
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    vgg_model = VGGNet(pretrained=False,
                       requires_grad=False,
                       show_params=False)
    fcn_model = FCN8s(pretrained_net=vgg_model, n_class=2)
    if not torch.cuda.is_available():
        fcn_model.load_state_dict(torch.load(model_path, map_location='cpu'))
    else:
        fcn_model.load_state_dict(torch.load(model_path))
    #print(fcn_model)
    # fcn_model=torch.load(model_path)
    fcn_model = fcn_model.to(device)
    fcn_model.eval()
    miou = 0
    num = 0
    if os.path.exists(TEST_RESULT):
        shutil.rmtree(TEST_RESULT)

    os.mkdir(TEST_RESULT)

    for index, (bag, bag_msk) in enumerate(test_dataloader):
        with torch.no_grad():
            bag = bag.to(device)
            bag_msk = bag_msk.to(device)
            output = fcn_model(bag)
            output_np = output.cpu().detach().numpy().copy(
            )  # output_np.shape = (4, 2, 160, 160)
            output_np = np.argmin(output_np, axis=1)
            output_np = np.squeeze(output_np[0, ...])
            bag_msk_np = bag_msk.cpu().detach().numpy().copy(
            )  # bag_msk_np.shape = (4, 2, 160, 160)
            bag_msk_np = np.argmin(bag_msk_np, axis=1)
            bag_msk_np = np.squeeze(bag_msk_np[0, ...])
            cv2.imwrite("test_result/" + str(index) + "_test.jpg",
                        255 * output_np)
            cv2.imwrite("test_result/" + str(index) + "_gt.jpg",
                        255 * bag_msk_np)
            inter = np.sum(np.multiply(output_np, bag_msk_np))
            union = np.sum(output_np) + np.sum(bag_msk_np) - inter
            miou += inter / union
            num = index
    miou = miou / (num + 1)
    print("MIOU is {}".format(miou))
コード例 #4
0
ファイル: train.py プロジェクト: rongmengling/unet_window
def train(epo_num=50, show_vgg_params=False):

    vis = visdom.Visdom()

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    vgg_model = VGGNet(requires_grad=True, show_params=show_vgg_params)
    fcn_model = FCNs(pretrained_net=vgg_model, n_class=2)
    fcn_model = fcn_model.to(device)
    criterion = nn.BCELoss().to(device)
    optimizer = optim.SGD(fcn_model.parameters(), lr=1e-2, momentum=0.7)

    all_train_iter_loss = []
    all_test_iter_loss = []

    # start timing
    prev_time = datetime.now()
    for epo in range(epo_num):

        train_loss = 0
        fcn_model.train()
        for index, (bag, bag_msk) in enumerate(train_dataloader):
            # bag.shape is torch.Size([4, 3, 160, 160])
            # bag_msk.shape is torch.Size([4, 2, 160, 160])

            bag = bag.to(device)
            bag_msk = bag_msk.to(device)

            optimizer.zero_grad()
            output = fcn_model(bag)
            output = torch.sigmoid(
                output)  # output.shape is torch.Size([4, 2, 160, 160])
            # print('label shape:', bag_msk.shape)
            # print('pred shape:', output.shape)
            # print('label:', bag_msk)
            # print('pred:', output)

            loss = criterion(output, bag_msk)
            loss.backward()
            iter_loss = loss.item()
            all_train_iter_loss.append(iter_loss)
            train_loss += iter_loss
            optimizer.step()

            output_np = output.cpu().detach().numpy().copy(
            )  # output_np.shape = (4, 2, 160, 160)
            output_np = np.argmin(output_np, axis=1)
            bag_msk_np = bag_msk.cpu().detach().numpy().copy(
            )  # bag_msk_np.shape = (4, 2, 160, 160)
            bag_msk_np = np.argmin(bag_msk_np, axis=1)

            if np.mod(index, 15) == 0:
                print('epoch {}, {}/{},train loss is {}'.format(
                    epo, index, len(train_dataloader), iter_loss))
                vis.close()
                vis.images(output_np[:, None, :, :],
                           win='train_pred',
                           opts=dict(title='train prediction'))
                vis.images(bag_msk_np[:, None, :, :],
                           win='train_label',
                           opts=dict(title='label'))
                vis.line(all_train_iter_loss,
                         win='train_iter_loss',
                         opts=dict(title='train iter loss'))

            # plt.subplot(1, 2, 1)
            # plt.imshow(np.squeeze(bag_msk_np[0, ...]), 'gray')
            # plt.subplot(1, 2, 2)
            # plt.imshow(np.squeeze(output_np[0, ...]), 'gray')
            # plt.pause(0.5)

        test_loss = 0
        fcn_model.eval()
        with torch.no_grad():
            for index, (bag, bag_msk) in enumerate(test_dataloader):

                bag = bag.to(device)
                bag_msk = bag_msk.to(device)

                optimizer.zero_grad()
                output = fcn_model(bag)
                output = torch.sigmoid(
                    output)  # output.shape is torch.Size([4, 2, 160, 160])
                loss = criterion(output, bag_msk)
                iter_loss = loss.item()
                all_test_iter_loss.append(iter_loss)
                test_loss += iter_loss

                output_np = output.cpu().detach().numpy().copy(
                )  # output_np.shape = (4, 2, 160, 160)
                output_np = np.argmin(output_np, axis=1)
                bag_msk_np = bag_msk.cpu().detach().numpy().copy(
                )  # bag_msk_np.shape = (4, 2, 160, 160)
                bag_msk_np = np.argmin(bag_msk_np, axis=1)

                if np.mod(index, 15) == 0:
                    print(
                        r'Testing... Open http://localhost:8097/ to see test result.'
                    )
                    vis.close()
                    vis.images(output_np[:, None, :, :],
                               win='test_pred',
                               opts=dict(title='test prediction'))
                    vis.images(bag_msk_np[:, None, :, :],
                               win='test_label',
                               opts=dict(title='label'))
                    vis.line(all_test_iter_loss,
                             win='test_iter_loss',
                             opts=dict(title='test iter loss'))

                # plt.subplot(1, 2, 1)
                # plt.imshow(np.squeeze(bag_msk_np[0, ...]), 'gray')
                # plt.subplot(1, 2, 2)
                # plt.imshow(np.squeeze(output_np[0, ...]), 'gray')
                # plt.pause(0.5)

        cur_time = datetime.now()
        h, remainder = divmod((cur_time - prev_time).seconds, 3600)
        m, s = divmod(remainder, 60)
        time_str = "Time %02d:%02d:%02d" % (h, m, s)
        prev_time = cur_time

        print('epoch train loss = %f, epoch test loss = %f, %s' %
              (train_loss / len(train_dataloader),
               test_loss / len(test_dataloader), time_str))

        if np.mod(epo, 5) == 0:
            torch.save(fcn_model, 'checkpoints/fcn_model_{}.pt'.format(epo))
            print('saveing checkpoints/fcn_model_{}.pt'.format(epo))
コード例 #5
0
from torch.autograd import Variable
from torch.utils.data import DataLoader
from utiles.evalution_segmentaion import eval_semantic_segmentation
import torch.nn.functional as F
import torch as t
from predict import test_dataset
from FCN import FCN8s, VGGNet

BATCH_SIZE = 2
miou_list = [0]
test_data = DataLoader(test_dataset,
                       batch_size=BATCH_SIZE,
                       shuffle=True,
                       num_workers=0)

vgg_model = VGGNet(requires_grad=True)
net = FCN8s(pretrained_net=vgg_model, n_class=12)
net.eval()
net.cuda()
net.load_state_dict(
    t.load('D:/机器学习/cvpaper/03语义分割-fcn论文原文及代码附件(1)/code/logs/last.pth'))  #加载模型

train_acc = 0
train_miou = 0
train_class_acc = 0
train_mpa = 0
error = 0

for i, sample in enumerate(test_data):  #(data, label)-->sample
    # data = Variable(data).cuda()
    # label = Variable(label).cuda()
コード例 #6
0
def main():

    vgg_model = VGGNet(requires_grad=True)
    net = FCN8s(pretrained_net=vgg_model, n_class=12)
    net = net.cuda()
    criterion = nn.NLLLoss().cuda()
    optimizer = optim.Adam(net.parameters(), lr=1e-4)

    eval_miou_list = []
    best = [0]
    print('-----------------------train-----------------------')

    for epoch in range(30):
        if epoch % 10 == 0 and epoch != 0:
            for group in optimizer.param_groups:
                group['lr'] *= 0.5

        train_loss = 0
        train_acc = 0
        train_miou = 0
        train_class_acc = 0
        #global net  #自认为加的
        net = net.train()
        prec_time = datetime.now()
        for i, sample in enumerate(train_data):
            imgdata = Variable(sample['img'].cuda())
            imglabel = Variable(sample['label'].long().cuda())

            optimizer.zero_grad()
            out = net(imgdata)
            out = F.log_softmax(out, dim=1)

            loss = criterion(out, imglabel)

            loss.backward()
            optimizer.step()
            train_loss = loss.item() + train_loss

            pre_label = out.max(dim=1)[1].data.cpu().numpy()
            pre_label = [i for i in pre_label]

            true_label = imglabel.data.cpu().numpy()
            true_label = [i for i in true_label]

            eval_metrix = eval_semantic_segmentation(pre_label, true_label)
            train_acc = eval_metrix['mean_class_accuracy'] + train_acc
            train_miou = eval_metrix['miou'] + train_miou
            train_class_acc = train_class_acc + eval_metrix['class_accuracy']

        net = net.eval()
        eval_loss = 0
        eval_acc = 0
        eval_miou = 0
        eval_class_acc = 0

        for j, sample in enumerate(val_data):
            valImg = Variable(sample['img'].cuda())
            valLabel = Variable(sample['label'].long().cuda())

            out = net(valImg)
            out = F.log_softmax(out, dim=1)
            loss = criterion(out, valLabel)
            eval_loss = loss.item() + eval_loss
            pre_label = out.max(dim=1)[1].data.cpu().numpy()
            pre_label = [i for i in pre_label]

            true_label = valLabel.data.cpu().numpy()
            true_label = [i for i in true_label]

            eval_metrics = eval_semantic_segmentation(pre_label, true_label)
            eval_acc = eval_metrics['mean_class_accuracy'] + eval_acc
            eval_miou = eval_metrics['miou'] + eval_miou
            eval_class_acc = eval_metrix['class_accuracy'] + eval_class_acc

        cur_time = datetime.now()
        h, remainder = divmod((cur_time - prec_time).seconds, 3600)
        m, s = divmod(remainder, 60)

        epoch_str = (
            'Epoch: {}, Train Loss: {:.5f}, Train Acc: {:.5f}, Train Mean IU: {:.5f}, Train_class_acc:{:} \
        Valid Loss: {:.5f}, Valid Acc: {:.5f}, Valid Mean IU: {:.5f} ,Valid Class Acc:{:}'
            .format(epoch, train_loss / len(train_data),
                    train_acc / len(train_data), train_miou / len(train_data),
                    train_class_acc / len(train_data),
                    eval_loss / len(train_data), eval_acc / len(val_data),
                    eval_miou / len(val_data), eval_class_acc / len(val_data)))
        time_str = 'Time: {:.0f}:{:.0f}:{:.0f}'.format(h, m, s)
        print(epoch_str + time_str)

        if (max(best) <= eval_miou / len(val_data)):
            best.append(eval_miou / len(val_data))
            t.save(net.state_dict(),
                   'D:/机器学习/cvpaper/03语义分割-fcn论文原文及代码附件(1)/code/logs/last.pth'
                   )  # 'xxx.pth' #保存模型
コード例 #7
0
def train(
    epoch_num=100,
    lr=LR,
    show_vgg_params=False,
):

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    vgg_model = VGGNet(requires_grad=True, show_params=show_vgg_params)
    fcn_model = FCNs(pretrained_net=vgg_model, n_class=2)
    fcn_model = fcn_model.to(device)
    criterion = nn.BCELoss().to(device)  # 损失函数
    optimizer = optim.SGD(fcn_model.parameters(), lr=lr, momentum=0.7)  # 优化器

    all_train_iter_loss = []
    all_test_iter_loss = []
    previous_loss = 1e100

    prev_time = datetime.now()  # 开始计时
    for epoch in range(epoch_num):  # 开始迭代

        train_loss = 0
        fcn_model.train()  # 训练模式
        for index, (bag, bag_msk) in enumerate(train_dataloader):
            # bag.shape is torch.Size([4, 3, 160, 160])
            # bag_msk.shape is torch.Size([4, 2, 160, 160])

            bag = bag.to(device)
            bag_msk = bag_msk.to(device)

            optimizer.zero_grad()
            output = fcn_model(bag)
            output = torch.sigmoid(
                output
            )  # output.shape is torch.Size([4, 2, 160, 160])  sigmoid -- 激活函数
            # print(output)
            # print(bag_msk)
            loss = criterion(output, bag_msk)
            loss.backward()
            iter_loss = loss.item()
            all_train_iter_loss.append(iter_loss)
            train_loss += iter_loss
            optimizer.step()

            output_np = output.cpu().detach().numpy().copy(
            )  # output_np.shape = (4, 2, 160, 160)
            output_np = np.argmin(output_np, axis=1)
            bag_msk_np = bag_msk.cpu().detach().numpy().copy(
            )  # bag_msk_np.shape = (4, 2, 160, 160)
            bag_msk_np = np.argmin(bag_msk_np, axis=1)

        test_loss = 0
        fcn_model.eval()  # 验证模式
        with torch.no_grad():
            for index, (bag, bag_msk) in enumerate(test_dataloader):
                bag = bag.to(device)
                bag_msk = bag_msk.to(device)

                optimizer.zero_grad()
                output = fcn_model(bag)
                output = torch.sigmoid(
                    output)  # output.shape is torch.Size([4, 2, 160, 160])
                loss = criterion(output, bag_msk)
                iter_loss = loss.item()
                all_test_iter_loss.append(iter_loss)
                test_loss += iter_loss

                output_np = output.cpu().detach().numpy().copy(
                )  # output_np.shape = (4, 2, 160, 160)
                output_np = np.argmin(output_np, axis=1)
                bag_msk_np = bag_msk.cpu().detach().numpy().copy(
                )  # bag_msk_np.shape = (4, 2, 160, 160)
                bag_msk_np = np.argmin(bag_msk_np, axis=1)

        cur_time = datetime.now()
        h, remainder = divmod((cur_time - prev_time).seconds, 3600)
        m, s = divmod(remainder, 60)
        time_str = "Time %02d:%02d:%02d" % (h, m, s)
        prev_time = cur_time
        if test_loss > previous_loss and lr > 0.001:
            lr *= lr_decay
        previous_loss = test_loss

        print('epoch train loss = %f, epoch test loss = %f, %s' %
              (train_loss / len(train_dataloader),
               test_loss / len(test_dataloader), time_str))

        if epoch == 0:
            torch.save(
                fcn_model,
                '/home/hxz/networks/BW_NET/model/fcn_model_{}.pt'.format(
                    epoch + 1))
            print('saving checkpoints/fcn_model_{}.pt'.format(epoch + 1))
        if np.mod(epoch + 1, 4) == 0:
            torch.save(
                fcn_model,
                '/home/hxz/networks/BW_NET/model/fcn_model_{}.pt'.format(
                    epoch + 1))
            print('saving checkpoints/fcn_model_{}.pt'.format(epoch + 1))
コード例 #8
0
ファイル: train.py プロジェクト: Mael-zys/FCN-and-FPN
def train(epo_num=50, show_vgg_params=False):

    #vis = visdom.Visdom()
    os.environ["CUDA_VISIBLE_DEVICES"] = '3'
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(device)
    vgg_model = VGGNet(requires_grad=True, show_params=show_vgg_params)
    fcn_model = FCNs(pretrained_net=vgg_model, n_class=2)
    if not torch.cuda.is_available():
        fcn_model.load_state_dict(torch.load(model_path, map_location='cpu'))
    else:
        fcn_model.load_state_dict(torch.load(model_path))
    fcn_model = fcn_model.to(device)
    criterion = nn.BCELoss().to(device)
    # criterion = nn.BCEWithLogitsLoss().to(device)
    optimizer = optim.SGD(fcn_model.parameters(), lr=1e-3, momentum=0.7)
    # scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=6, gamma=0.1)
    all_train_iter_loss = []
    all_test_iter_loss = []

    # start timing
    prev_time = datetime.now()
    for epo in range(epo_num):

        train_loss = 0
        fcn_model.train()
        for index, (bag, bag_msk) in enumerate(train_dataloader):
            # bag.shape is torch.Size([4, 3, 160, 160])
            # bag_msk.shape is torch.Size([4, 2, 160, 160])

            bag = bag.to(device)
            bag_msk = bag_msk.to(device)

            optimizer.zero_grad()
            output = fcn_model(bag)
            output = torch.sigmoid(
                output)  # output.shape is torch.Size([4, 2, 160, 160])
            loss = criterion(output, bag_msk)
            loss.backward()
            iter_loss = loss.item()
            all_train_iter_loss.append(iter_loss)
            train_loss += iter_loss
            optimizer.step()

            output_np = output.cpu().detach().numpy().copy(
            )  # output_np.shape = (4, 2, 160, 160)
            output_np = np.argmin(output_np, axis=1)
            #print("size of output is {}".format(output_np.shape))
            bag_msk_np = bag_msk.cpu().detach().numpy().copy(
            )  # bag_msk_np.shape = (4, 2, 160, 160)
            bag_msk_np = np.argmin(bag_msk_np, axis=1)

            if np.mod(index, 50) == 0:
                print('epoch {}, {}/{},train loss is {}'.format(
                    epo, index, len(train_dataloader), iter_loss))
                # vis.close()
                # vis.images(output_np[:, None, :, :], win='train_pred', opts=dict(title='train prediction'))
                # vis.images(bag_msk_np[:, None, :, :], win='train_label', opts=dict(title='label'))
                # vis.line(all_train_iter_loss, win='train_iter_loss',opts=dict(title='train iter loss'))

                # plt.subplot(1, 2, 1)
                # plt.imshow(np.squeeze(bag_msk_np[0, ...]), 'gray')
                # plt.subplot(1, 2, 2)
                # plt.imshow(np.squeeze(output_np[0, ...]), 'gray')
                # plt.pause(0.5)
                # plt.savefig("Result/"+str(index)+"_train.png")
                cv2.imwrite("Result/" + str(index) + "_train.jpg",
                            255 * np.squeeze(output_np[0, ...]))

        test_loss = 0
        fcn_model.eval()
        num_test = 0
        with torch.no_grad():
            for index, (bag, bag_msk) in enumerate(test_dataloader):

                bag = bag.to(device)
                bag_msk = bag_msk.to(device)

                optimizer.zero_grad()
                output = fcn_model(bag)
                output = torch.sigmoid(
                    output)  # output.shape is torch.Size([4, 2, 160, 160])
                loss = criterion(output, bag_msk)
                iter_loss = loss.item()

                test_loss += iter_loss
                num_test = index + 1
                output_np = output.cpu().detach().numpy().copy(
                )  # output_np.shape = (4, 2, 160, 160)
                output_np = np.argmin(output_np, axis=1)
                bag_msk_np = bag_msk.cpu().detach().numpy().copy(
                )  # bag_msk_np.shape = (4, 2, 160, 160)
                bag_msk_np = np.argmin(bag_msk_np, axis=1)

                if np.mod(index, 10) == 0:
                    # plt.subplot(1, 2, 1)
                    # plt.imshow(np.squeeze(bag_msk_np[0, ...]), 'gray')
                    # plt.subplot(1, 2, 2)
                    # plt.imshow(np.squeeze(output_np[0, ...]), 'gray')
                    # plt.pause(0.5)
                    # plt.savefig("Result/"+str(index)+"_test.png")
                    cv2.imwrite("Result/" + str(index) + "_test.jpg",
                                255 * np.squeeze(output_np[0, ...]))
        all_test_iter_loss.append(test_loss / num_test)

        cur_time = datetime.now()
        h, remainder = divmod((cur_time - prev_time).seconds, 3600)
        m, s = divmod(remainder, 60)
        time_str = "Time %02d:%02d:%02d" % (h, m, s)
        prev_time = cur_time

        print('epoch train loss = %f, epoch test loss = %f, %s' %
              (train_loss / len(train_dataloader),
               test_loss / len(test_dataloader), time_str))

        draw_loss_plot(all_train_iter_loss, all_test_iter_loss)

        # if np.mod(epo+1, 10) == 0:
        #torch.save(fcn_model, 'checkpoints/fcn_model_{}.pt'.format(epo+1))
        torch.save(fcn_model.state_dict(),
                   'model_test/fcn_0.001_{0}.model'.format(epo))
        #torch.save(fcn_model, 'model/fcn_{0}.model'.format(epo+1))
        print('saveing model/fcn_{0}.model'.format(epo))
コード例 #9
0
ファイル: train.py プロジェクト: KinsoZHENG/pytorch_FCN
def train(epo_num=50, show_vgg_params=False):
    vis = visdom.Visdom()

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    vgg_model = VGGNet(requires_grad=True, show_params=show_vgg_params)
    fcn_model = FCNs(pretrained_net=vgg_model, n_class=2)
    fcn_model = fcn_model.to(device)

    criterion = nn.BCELoss().to(device)
    optimizer = optim.SGD(fcn_model.parameters(), lr=1e-2, momentum=0.9)
    lr = 0.01
    # fcn_model, optimizer = amp.initialize(fcn_model, optimizer, opt_level='O1', verbosity=0)

    all_train_iter_loss = []
    all_test_iter_loss = []

    # start timing
    prev_time = datetime.now()
    for epo in range(epo_num + 1):
        # data set random init each epoch
        data = customer_Dataset(transform)
        train_dataset, test_dataset = train_test_split(data,
                                                       test_size=0.1,
                                                       random_state=1)
        train_dataloader = DataLoader(train_dataset,
                                      batch_size=16,
                                      shuffle=True,
                                      num_workers=16)
        test_dataloader = DataLoader(test_dataset,
                                     batch_size=16,
                                     shuffle=True,
                                     num_workers=16)

        train_loss = 0  # clear train as 0
        fcn_model.train()
        if (epo % 25 == 0) and (epo != 0):
            lr = lr * 0.1
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr
        """
        Part of training
        """
        for index, (data, data_msk) in enumerate(train_dataloader):
            # data.shape is torch.Size([4, 3, 160, 160])
            # data_msk.shape is torch.Size([4, 2, 160, 160])

            data = data.to(device)
            data_msk = data_msk.to(device)

            # data_msk = data_msk[0].to(device)
            # data_msk = data_msk[1].to(device)
            # data_msk = data_msk[2].to(device)
            # data_msk = data_msk[3].to(device)
            # data_msk = data_msk[4].to(device)

            optimizer.zero_grad()
            output = fcn_model(data)
            output = torch.sigmoid(
                output)  # output.shape is torch.Size([4, 2, 160, 160])
            loss = criterion(output, data_msk)
            loss.backward()
            iter_loss = loss.item()
            all_train_iter_loss.append(iter_loss)
            train_loss += iter_loss
            optimizer.step()

            output_np = output.cpu().detach().numpy().copy(
            )  # output_np.shape = (4, 2, 160, 160)
            output_np = np.argmin(output_np, axis=1)
            data_msk_np = data_msk.cpu().detach().numpy().copy(
            )  # data_msk_np.shape = (4, 2, 160, 160)
            data_msk_np = np.argmin(data_msk_np, axis=1)
            data = data.cpu().detach().numpy().copy()

            if np.mod(index, 7) == 0:
                print(
                    '[train] epoch {}/{}, {}/{},\ttrain loss is {},\tlearning rate is {}'
                    .format(epo, epo_num, index, len(train_dataloader),
                            iter_loss, lr))
                # vis.close()
                for idx in range(len(data)):
                    data[idx] = image_process_cv_to_vis(data[idx])

                vis.images(data,
                           win='train_input_ori_image',
                           opts=dict(title='train_input_ori_image'))

                vis.images(data_msk_np[:, None, :, :],
                           win='train_label',
                           opts=dict(title='train_label'))

                vis.images(output_np[:, None, :, :],
                           win='train_pred',
                           opts=dict(title='train prediction'))

                vis.line(all_train_iter_loss,
                         win='train_iter_loss',
                         opts=dict(title='train iter loss'))

        # vis.line(train_loss, epo, win='train_epoch_loss', opts=dict(title='train_epoch_loss'))
        """
        Part of eval
        """
        test_loss = 0  # clear test_loss
        fcn_model.eval()
        with torch.no_grad():
            for index, (data, data_msk) in enumerate(test_dataloader):

                data = data.to(device)
                data_msk = data_msk.to(device)

                optimizer.zero_grad()
                output = fcn_model(data)
                output = torch.sigmoid(
                    output)  # output.shape is torch.Size([4, 2, 160, 160])
                loss = criterion(output, data_msk)
                iter_loss = loss.item()
                all_test_iter_loss.append(iter_loss)
                test_loss += iter_loss

                output_np = output.cpu().detach().numpy().copy(
                )  # output_np.shape = (4, 2, 160, 160)
                output_np = np.argmin(output_np, axis=1)
                data_msk_np = data_msk.cpu().detach().numpy().copy(
                )  # data_msk_np.shape = (4, 2, 160, 160)
                data_msk_np = np.argmin(data_msk_np, axis=1)
                data = data.cpu().detach().numpy().copy()

                if np.mod(index, 1) == 0:
                    print(
                        '[test]  epoch {}/{}, {}/{},\ttest  loss is {},\tlearning rate is {}'
                        .format(epo, epo_num, index, len(test_dataloader),
                                iter_loss, lr))
                    # vis.close()
                    for idx in range(len(data)):
                        data[idx] = image_process_cv_to_vis(data[idx])

                    vis.images(data,
                               win='test_input_ori_image',
                               opts=dict(title='test_input_ori_image'))

                    vis.images(data_msk_np[:, None, :, :],
                               win='test_label',
                               opts=dict(title='test_label'))

                    vis.images(output_np[:, None, :, :],
                               win='test_pred',
                               opts=dict(title='test prediction'))

                    vis.line(all_test_iter_loss,
                             win='test_iter_loss',
                             opts=dict(title='test iter loss'))

        # vis.line(test_loss, epo, win='test_loss_loss', opts=dict(title='test_loss_loss'))

        cur_time = datetime.now()
        h, remainder = divmod((cur_time - prev_time).seconds, 3600)
        m, s = divmod(remainder, 60)
        time_str = "Time %02d:%02d:%02d" % (h, m, s)
        prev_time = cur_time

        print('epoch train loss = %f, epoch test loss = %f, %s\n\n' %
              (train_loss / len(train_dataloader),
               test_loss / len(test_dataloader), time_str))

        if np.mod(epo, 5) == 0:
            torch.save(fcn_model, 'checkpoints/fcn_model_{}.pt'.format(epo))
            print('saveing checkpoints/fcn_model_{}.pt\n\n'.format(epo))
コード例 #10
0
import torch
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt

from data_loader import test_dataloader
from multiprocessing import set_start_method
from FCN import FCN8s, VGGNet
from config import *
from torchvision import transforms

vgg_model = VGGNet(requires_grad=True, show_params=False)
fcn_model = FCN8s(vgg_model, 2)

# Load pytorch model
fcn_model.load_state_dict(torch.load('model'))
fcn_model.eval()

try:
    set_start_method('spawn')
except:
    pass

for idx, (img, label) in enumerate(test_dataloader):

    output = fcn_model(img)
    output_np = output.cpu().detach().numpy().copy()
    output_np = np.argmin(output_np, axis=1)

    # Origin image
コード例 #11
0
def train(show_vgg_params=False):

    vis = visdom.Visdom(env='fcn')
    """
    torch.device: an object representing the device on which a torch.Tensor is or will be allocated
    """

    # Use cuda training if cuda is available
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    vgg_model = VGGNet(requires_grad=True, show_params=show_vgg_params)
    fcn_model = FCN8s(pretrained_net=vgg_model, n_class=2)
    # Copy data to GPU and run on GPU
    fcn_model = fcn_model.to(device)

    # Binary cross entropy loss function
    criterion = nn.BCELoss().to(device)
    # Stochastic Gradient Descent optimizer
    optimizer = optim.SGD(fcn_model.parameters(), lr=1e-2, momentum=0.7)

    all_train_iter_loss = []
    all_test_iter_loss = []

    # Start timing
    stime = datetime.now()

    try:
        set_start_method('spawn')
    except:
        pass

    for epo in range(EPOCH_NUM):
        """
        Training part
        """
        train_loss = 0
        fcn_model.train()
        for idx, (img, label) in enumerate(train_dataloader):

            img = img.to(device)
            label = label.to(device)
            """
            Init grad to zero
            We need to set the gradients to zero before starting to do backpropagation
            because pytorch accumulates the gradients on subsequent backward passes
            """
            optimizer.zero_grad()
            output = fcn_model(img)
            output = torch.sigmoid(output)  # Get probability

            # Calc loss and backpropagation
            loss = criterion(output, label)
            loss.backward()

            # Extract loss value to echo
            iter_loss = loss.item()
            train_loss += iter_loss
            all_train_iter_loss.append(iter_loss)

            # Update all parameter
            optimizer.step()
            """
            Get the training result of every epoch

            cpu(): put data in cpu
            detach(): return a tensor disallowed backpropagation
            numpy(): cast tensor to numpy
            copy(): copy
            """
            """
            output.shape: torch.Size([BATCH_SIZE, 2, *IMAGE_SIZE])
            output_np.shape: (BATCH_SIZE, 2, *IMAGE_SIZE)
            np.argmin(output_np): (BATCH_SIZE, *IMAGE_SIZE)
            np.squeeze(output_np[0, ...]).shape: (*IMAGE_SIZE)
            """
            output_np = output.cpu().detach().numpy().copy()
            output_np = np.argmin(
                output_np, axis=1)  # Get indice of smallest value of row
            label_np = label.cpu().detach().numpy().copy()
            label_np = np.argmin(label_np, axis=1)

            # 每 15 步紀錄一次
            if np.mod(idx, 15) == 0:
                print('epoch {}, {}/{},train loss is {}'.format(
                    epo, idx, len(train_dataloader), iter_loss))  # Log

                # output_np[:, None, ...] == (batch_size, 1, height, width), batch_size 決定顯示圖片的數量
                # visdom:
                #   - win: windows
                #   - opts: 設定 visualization 的 config
                vis.images(output_np[:, None, :, :],
                           win='train_pred',
                           opts=dict(title='train prediction'))
                vis.images(label_np[:, None, :, :],
                           win='train_label',
                           opts=dict(title='label'))
                vis.line(all_train_iter_loss,
                         win='train_iter_loss',
                         opts=dict(title='train iter loss'))
        """
        Testing part
        """
        test_loss = 0
        fcn_model.eval()
        for idx, (img, label) in enumerate(test_dataloader):

            img = img.to(device)
            label = label.to(device)

            loss = criterion(output, label)
            optimizer.zero_grad()

            output = fcn_model(img)
            output = torch.sigmoid(output)

            iter_loss = loss.item()
            test_loss += iter_loss
            all_test_iter_loss.append(iter_loss)

            output_np = output.cpu().detach().numpy().copy()
            output_np = np.argmin(output_np, axis=1)

            label_np = label.cpu().detach().numpy().copy()
            label_np = np.argmin(label_np, axis=1)

            if np.mod(idx, 15) == 0:
                print(
                    r'Testing... Open http://localhost:8097/ to see test result.'
                )
                vis.images(output_np[:, None, :, :],
                           win='test_pred',
                           opts=dict(title='test prediction'))
                vis.images(label_np[:, None, :, :],
                           win='test_label',
                           opts=dict(title='label'))
                vis.line(all_test_iter_loss,
                         win='test_iter_loss',
                         opts=dict(title='test iter loss'))

        etime = datetime.now()  # End time
        h, remainder = divmod((etime - stime).seconds, 3600)
        m, s = divmod(remainder, 60)
        time_str = "Time %02d:%02d:%02d" % (h, m, s)
        stime = etime

        # Log
        print('epoch train loss = %f, epoch test loss = %f, %s' %
              (train_loss / len(train_dataloader),
               test_loss / len(test_dataloader), time_str))

    # Save trained model
    torch.save(fcn_model.state_dict(), 'model')