Ejemplo n.º 1
0
def main():
    """Create the model and start the evaluation process."""
    args = get_arguments()

    gpu0 = args.gpu

    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)


    model = Res_Deeplab(num_classes=args.num_classes)

    if args.pretrained_model != None:
        args.restore_from = pretrianed_models_dict[args.pretrained_model]

    if args.restore_from[:4] == 'http' :
        saved_state_dict = model_zoo.load_url(args.restore_from)
    else:
        saved_state_dict = torch.load(args.restore_from)
    model.load_state_dict(saved_state_dict)

    model.eval()
    model.cuda(gpu0)

    testloader = data.DataLoader(VOCDataSet(args.data_dir, args.data_list, crop_size=(505, 505), mean=IMG_MEAN, scale=False, mirror=False),
                                    batch_size=1, shuffle=False, pin_memory=True)

    if version.parse(torch.__version__) >= version.parse('0.4.0'):
        interp = nn.Upsample(size=(505, 505), mode='bilinear', align_corners=True)
    else:
        interp = nn.Upsample(size=(505, 505), mode='bilinear')
    data_list = []

    colorize = VOCColorize()

    for index, batch in enumerate(testloader):
        if index % 100 == 0:
            print('%d processd'%(index))
        image, label, size, name = batch
        size = size[0].numpy()
        output = model(Variable(image, volatile=True).cuda(gpu0))
        output = interp(output).cpu().data[0].numpy()

        output = output[:,:size[0],:size[1]]
        gt = np.asarray(label[0].numpy()[:size[0],:size[1]], dtype=np.int)

        output = output.transpose(1,2,0)
        output = np.asarray(np.argmax(output, axis=2), dtype=np.int)

        filename = os.path.join(args.save_dir, '{}.png'.format(name[0]))
        color_file = Image.fromarray(colorize(output).transpose(1, 2, 0), 'RGB')
        color_file.save(filename)

        # show_all(gt, output)
        data_list.append([gt.flatten(), output.flatten()])

    filename = os.path.join(args.save_dir, 'result.txt')
    get_iou(data_list, args.num_classes, filename)
def main():
    """Create the model and start the evaluation process."""
    args = get_arguments()

    gpu0 = args.gpu

    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)


    model = Res_Deeplab(num_classes=args.num_classes)

    if args.pretrained_model != None:
        args.restore_from[i] = pretrianed_models_dict[args.pretrained_model] # if there is pretrained model, restore_from will be changed

    if args.restore_from[i][:4] == 'http' :
        saved_state_dict = model_zoo.load_url(args.restore_from[i])
    else:
        saved_state_dict = torch.load(args.restore_from[i])  ##VOC_25000
    model.load_state_dict(saved_state_dict)

    model.eval() #evaluation mode
    model.cuda(gpu0)

    testloader = data.DataLoader(VOCDataSet(args.data_dir, args.data_list, crop_size=(505, 505), mean=IMG_MEAN, scale=False, mirror=False),
                                    batch_size=1, shuffle=False, pin_memory=True)

    if version.parse(torch.__version__) >= version.parse('0.4.0'):
        interp = nn.Upsample(size=(505, 505), mode='bilinear', align_corners=True)
    else:
        interp = nn.Upsample(size=(505, 505), mode='bilinear')
    data_list = []

    colorize = VOCColorize() # colorize!
    with torch.no_grad():### added for 0.4
        for index, batch in enumerate(testloader):
            if index % 100 == 0:
                print('%d processd'%(index))
            image, label, size, name = batch # size >>  tensor([[366, 500,   3]])
            size = size[0].numpy() ## [366 500 3]
            output = model(image.cuda(args.gpu))
            output = interp(output).cpu().data[0].numpy()# 21,505,505
            output = output[:,:size[0],:size[1]] # 21,366,500

            gt = np.asarray(label[0].numpy()[:size[0],:size[1]], dtype=np.int) # size of each image is diff

            output = output.transpose(1,2,0)
            output = np.asarray(np.argmax(output, axis=2), dtype=np.int)

            filename = os.path.join(args.save_dir, '{}.png'.format(name[0]))
            color_file = Image.fromarray(colorize(output).transpose(1, 2, 0), 'RGB') # colorize the output
            color_file.save(filename)

            # show_all(gt, output)
            data_list.append([gt.flatten(), output.flatten()])

    filename = os.path.join(args.save_dir, 'result'+args.restore_from[i][-10:-4]+'.txt')
    get_iou(data_list, args.num_classes, filename)
Ejemplo n.º 3
0
def main():
    """Create the model and start the evaluation process."""

    args = get_arguments()

    gpu0 = args.gpu

    if not os.path.exists(args.save):
        os.makedirs(args.save)

    if args.model == 'Deeplab':
        model = Res_Deeplab(num_classes=args.num_classes)
    elif args.model == 'DeeplabVGG':
        model = DeeplabVGG(num_classes=args.num_classes)

    saved_state_dict = torch.load(args.restore_from)

    ### for running different versions of pytorch
    model_dict = model.state_dict()
    saved_state_dict = {k: v for k, v in saved_state_dict.items() if k in model_dict}
    model_dict.update(saved_state_dict)
    ###
    model.load_state_dict(saved_state_dict)

    model.eval()
    model.cuda(gpu0)

    testloader = data.DataLoader(cityscapesDataSet(args.data_dir, args.data_list, crop_size=(1024, 512), mean=IMG_MEAN, scale=False, mirror=False, set=args.set),
                                    batch_size=1, shuffle=False, pin_memory=True)


    if version.parse(torch.__version__) >= version.parse('0.4.0'):
        interp = nn.Upsample(size=(1024, 2048), mode='bilinear', align_corners=True)
    else:
        interp = nn.Upsample(size=(1024, 2048), mode='bilinear')

    for index, batch in enumerate(testloader):
        if index % 100 == 0:
            print('%d processd' % index)
        image, _, name = batch
        if args.model == 'Deeplab':
            output = model(Variable(image, volatile=True).cuda(gpu0))
            output = interp(output).cpu().data[0].numpy()
        elif args.model == 'DeeplabVGG':
            output = model(Variable(image, volatile=True).cuda(gpu0))
            output = interp(output).cpu().data[0].numpy()

        output = output.transpose(1,2,0)
        output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)

        output_col = colorize_mask(output)
        output = Image.fromarray(output)

        name = name[0].split('/')[-1]
        output.save('%s/%s' % (args.save, name))
        output_col.save('%s/%s_color.png' % (args.save, name.split('.')[0]))
Ejemplo n.º 4
0
def main():
    """Create the model and start the evaluation process."""
    args = get_arguments()

    # gpu0 = args.gpu

    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)

    
    model = Res_Deeplab(num_classes=args.num_classes)

    if args.pretrained_model != None:
        args.restore_from = pretrianed_models_dict[args.pretrained_model]

    if args.restore_from[:4] == 'http' :
        saved_state_dict = model_zoo.load_url(args.restore_from)
    else:
        saved_state_dict = torch.load(args.restore_from)
    model.load_state_dict(saved_state_dict)

    model.eval()
    # model.cuda(gpu0)

    testloader = data.DataLoader(VOCDataSet(args.data_dir, args.data_list, crop_size=(505, 505), mean=IMG_MEAN, scale=False, mirror=False), 
                                    batch_size=1, shuffle=False, pin_memory=True)

    interp = nn.Upsample(size=(505, 505), mode='bilinear')
    data_list = []

    colorize = VOCColorize()

    for index, batch in enumerate(testloader):
        if index % 100 == 0:
            print('%d processd'%(index))
        image, label, size, name = batch
        size = size[0].numpy()
        # output = model(Variable(image, volatile=True).cuda(gpu0))
        output = model(Variable(image, volatile=True).cpu())
        output = interp(output).cpu().data[0].numpy()

        output = output[:,:size[0],:size[1]]
        gt = np.asarray(label[0].numpy()[:size[0],:size[1]], dtype=np.int)

        output = output.transpose(1,2,0)
        output = np.asarray(np.argmax(output, axis=2), dtype=np.int)
        
        filename = os.path.join(args.save_dir, '{}.png'.format(name[0]))
        color_file = Image.fromarray(colorize(output).transpose(1, 2, 0), 'RGB')
        color_file.save(filename)

        # show_all(gt, output)
        data_list.append([gt.flatten(), output.flatten()])

    filename = os.path.join(args.save_dir, 'result.txt')
    get_iou(data_list, args.num_classes, filename)
Ejemplo n.º 5
0
def eval(pth, cityscapes_eval_dir, i_iter):
    """Create the model and start the evaluation process."""

    args = get_arguments()

    gpu0 = args.gpu

    if args.model == 'ResNet':
        model = Res_Deeplab(num_classes=args.num_classes)
        saved_state_dict = torch.load(pth)
    elif args.model == 'VGG':
        model = DeeplabVGG(num_classes=args.num_classes)
        saved_state_dict = torch.load(pth)
    model.load_state_dict(saved_state_dict)

    model.eval()
    model.cuda(gpu0)

    cityscapesloader = data.DataLoader(cityscapesDataSet(
        args.cityscapes_data_dir,
        args.cityscapes_data_list,
        crop_size=(1024, 512),
        mean=IMG_MEAN,
        scale=False,
        mirror=False,
        set=args.set),
                                       batch_size=1,
                                       shuffle=False,
                                       pin_memory=True)

    interp = nn.Upsample(size=(1024, 2048),
                         mode='bilinear',
                         align_corners=True)

    for index, batch in enumerate(cityscapesloader):
        with torch.no_grad():
            if index % 100 == 0:
                print('%d processd' % index)
            image, _, name = batch

            output = model(Variable(image).cuda(gpu0))
            output = interp(output).cpu().data[0].numpy()

            output = output.transpose(1, 2, 0)
            output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)

            output_col = colorize_mask(output)
            output = Image.fromarray(output)

            name = name[0].split('/')[-1]
            output.save('%s/%s' % (cityscapes_eval_dir, name))
            output_col.save('%s/%s_color.png' %
                            (cityscapes_eval_dir, name.split('.')[0]))

            if i_iter == 0:
                break
Ejemplo n.º 6
0
def main():
    """Create the model and start the evaluation process."""
    args = get_arguments()

    gpu0 = args.gpu

    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)

    model = Res_Deeplab(num_classes=args.num_classes)

    # if args.pretrained_model != None:
    #     args.restore_from = pretrianed_models_dict[args.pretrained_model]
    #
    # if args.restore_from[:4] == 'http' :
    #     saved_state_dict = model_zoo.load_url(args.restore_from)
    # else:
    #     saved_state_dict = torch.load(args.restore_from)
    #model.load_state_dict(saved_state_dict)

    model = Res_Deeplab(num_classes=args.num_classes)
    #model.load_state_dict(torch.load('/data/wyc/AdvSemiSeg/snapshots/VOC_15000.pth'))#70.7
    state_dict = torch.load(
        '/data1/wyc/AdvSemiSeg/snapshots/VOC_t_baseline_1adv_mul_new_two_patch2_20000.pth'
    )  #baseline707 adv 709 nadv 705()*2#n adv0.694

    # state_dict = torch.load(
    #     '/home/wyc/VOC_t_baseline_nadv2_20000.pth')  # baseline707 adv 709 nadv 705()*2

    # original saved file with DataParallel

    # create new OrderedDict that does not contain `module.`
    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        name = k[7:]  # remove `module.`
        new_state_dict[name] = v
    # load params

    new_params = model.state_dict().copy()
    for name, param in new_params.items():
        print(name)
        if name in new_state_dict and param.size(
        ) == new_state_dict[name].size():
            new_params[name].copy_(new_state_dict[name])
            print('copy {}'.format(name))

    model.load_state_dict(new_params)

    model.eval()
    model.cuda(gpu0)

    testloader = data.DataLoader(VOCDataSet(args.data_dir,
                                            args.data_list,
                                            crop_size=(505, 505),
                                            mean=IMG_MEAN,
                                            scale=False,
                                            mirror=False),
                                 batch_size=1,
                                 shuffle=False,
                                 pin_memory=True)

    if version.parse(torch.__version__) >= version.parse('0.4.0'):
        interp = nn.Upsample(size=(505, 505),
                             mode='bilinear',
                             align_corners=True)
    else:
        interp = nn.Upsample(size=(505, 505), mode='bilinear')
    data_list = []

    colorize = VOCColorize()

    for index, batch in enumerate(testloader):
        if index % 100 == 0:
            print('%d processd' % (index))
        image, label, size, name = batch
        size = size[0].numpy()
        output = model(Variable(image, volatile=True).cuda(gpu0))
        output = interp(output).cpu().data[0].numpy()

        output = output[:, :size[0], :size[1]]
        gt = np.asarray(label[0].numpy()[:size[0], :size[1]], dtype=np.int)

        output = output.transpose(1, 2, 0)
        output = np.asarray(np.argmax(output, axis=2), dtype=np.int)

        filename = os.path.join(args.save_dir, '{}.png'.format(name[0]))
        color_file = Image.fromarray(
            colorize(output).transpose(1, 2, 0), 'RGB')
        color_file.save(filename)

        # show_all(gt, output)
        data_list.append([gt.flatten(), output.flatten()])

    filename = os.path.join(args.save_dir, 'result.txt')
    get_iou(data_list, args.num_classes, filename)
Ejemplo n.º 7
0
def main():
    """Create the model and start the evaluation process."""
    args = get_arguments()

    gpu0 = args.gpu

    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)


    model = Res_Deeplab(num_classes=args.num_classes)

    # if args.pretrained_model != None:
    #     args.restore_from = pretrianed_models_dict[args.pretrained_model]
    #
    # if args.restore_from[:4] == 'http' :
    #     saved_state_dict = model_zoo.load_url(args.restore_from)
    # else:
    #     saved_state_dict = torch.load(args.restore_from)
    #model.load_state_dict(saved_state_dict)

    model = Res_Deeplab(num_classes=args.num_classes)
    #model.load_state_dict(torch.load('/data/wyc/AdvSemiSeg/snapshots/VOC_15000.pth'))
    state_dict=torch.load('/data1/wyc/AdvSemiSeg/snapshots/VOC_t_concat_pred_img_15000.pth')
    from model.discriminator_pred_concat_img import FCDiscriminator

    model_D = FCDiscriminator(num_classes=args.num_classes)

    state_dict_d = torch.load('/data1/wyc/AdvSemiSeg/snapshots/VOC_t_concat_pred_img_15000_D.pth')


    # original saved file with DataParallel

    # create new OrderedDict that does not contain `module.`
    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        name = k[7:]  # remove `module.`
        new_state_dict[name] = v
    # load params
    new_params = model.state_dict().copy()
    for name, param in new_params.items():
        print (name)
        if name in new_state_dict and param.size() == new_state_dict[name].size():
            new_params[name].copy_(new_state_dict[name])
            print('copy {}'.format(name))

    model.load_state_dict(new_params)

    model.eval()
    model.cuda()

    new_state_dict_d = OrderedDict()
    for k, v in state_dict_d.items():
        name = k[7:]  # remove `module.`
        new_state_dict_d[name] = v

    new_params_d = model_D.state_dict().copy()
    for name, param in new_params_d.items():

        print (name)
        if name in new_state_dict_d and param.size() == new_state_dict_d[name].size():
            new_params_d[name].copy_(new_state_dict_d[name])
            print('copy {}'.format(name))

    model_D.load_state_dict(new_params_d)

    model_D.eval()
    model_D.cuda()

    testloader = data.DataLoader(VOCDataSet(args.data_dir, args.data_list, crop_size=(505, 505), mean=IMG_MEAN, scale=False, mirror=False),
                                    batch_size=1, shuffle=False, pin_memory=True)

    if version.parse(torch.__version__) >= version.parse('0.4.0'):
        interp = nn.Upsample(size=(505, 505), mode='bilinear', align_corners=True)
    else:
        interp = nn.Upsample(size=(505, 505), mode='bilinear')
    data_list = []

    colorize = VOCColorize()

    for index, batch in enumerate(testloader):
        if index % 100 == 0:
            print('%d processd'%(index))
        image, label, size, name = batch
        size = size[0].numpy()
        output = model(Variable(image, volatile=True).cuda())
        image_d=Variable(image, volatile=True).cuda()
        output=interp(output)
        output_dout = output.clone()
        output_pred = F.softmax(output, dim=1).cpu().data[0].numpy()
        label2=label[0].numpy()
        output = output.cpu().data[0].numpy()
        output = output[:,:size[0],:size[1]]
        gt = np.asarray(label[0].numpy()[:size[0],:size[1]], dtype=np.int)
        output = output.transpose(1,2,0)
        output = np.asarray(np.argmax(output, axis=2), dtype=np.int)

        #"""pred result"""
        filename = os.path.join(args.save_dir, '{}.png'.format(name[0]))
        color_file = Image.fromarray(colorize(output).transpose(1, 2, 0), 'RGB')
        color_file.save(filename)


        #"""the area of the pred which is wrong"""
        output_mistake=np.zeros(output.shape)
        semi_ignore_mask_correct = (output == gt)
        #semi_ignore_mask_255=(gt==255)
        output_mistake[semi_ignore_mask_correct] = 255
        #output_mistake[semi_ignore_mask_255] = 255
        output_mistake = np.expand_dims(output_mistake, axis=2)
        filename2 = os.path.join('/data1/wyc/AdvSemiSeg/pred_mis/', '{}.png'.format(name[0]))
        cv2.imwrite(filename2, output_mistake)



        #"""dis confidence map decide line of pred map"""
        D_out = interp(model_D(torch.cat([F.softmax(output_dout, dim=1),F.sigmoid(image_d)],1)))#67
        D_out_sigmoid = (F.sigmoid(D_out).data[0].cpu().numpy())
        D_out_sigmoid = D_out_sigmoid[:, :size[0], :size[1]]
        semi_ignore_mask_dout0 = (D_out_sigmoid < 0.0001)
        semi_ignore_mask_dout255 = (D_out_sigmoid >= 0.0001)

        D_out_sigmoid[semi_ignore_mask_dout0] = 0
        D_out_sigmoid[semi_ignore_mask_dout255] = 255
        filename2 = os.path.join('/data1/wyc/AdvSemiSeg/confidence_line/', '{}.png'.format(name[0]))#0 black 255 white
        cv2.imwrite(filename2,D_out_sigmoid.transpose(1, 2, 0))


        #""""pred max decide line of pred map"""
        # id2 = np.argmax(output_pred, axis=0)
        # map=np.zeros([1,id2.shape[0],id2.shape[1]])
        # for i in range(id2.shape[0]):
        #     for j in range(id2.shape[1]):
        #         map[0][i][j]=output_pred[id2[i][j]][i][j]
        # semi_ignore_mask2 = (map < 0.999999)
        # semi_ignore_mask3 = (map >= 0.999999)
        # map[semi_ignore_mask2] = 0
        # map[semi_ignore_mask3] = 255
        # map = map[:, :size[0], :size[1]]
        # filename2 = os.path.join('/data1/wyc/AdvSemiSeg/pred_line/', '{}.png'.format(name[0]))#0 black 255 white
        # cv2.imwrite(filename2,map.transpose(1, 2, 0))





        data_list.append([gt.flatten(), output.flatten()])

    filename = os.path.join(args.save_dir, 'result.txt')
    get_iou(data_list, args.num_classes, filename)
Ejemplo n.º 8
0
def evaluate(arch, dataset, ignore_label, restore_from, pretrained_model,
             save_dir, device):
    import argparse
    import scipy
    from scipy import ndimage
    import cv2
    import numpy as np
    import sys
    from collections import OrderedDict
    import os

    import torch
    import torch.nn as nn
    from torch.autograd import Variable
    import torchvision.models as models
    import torch.nn.functional as F
    from torch.utils import data, model_zoo

    from model.deeplab import Res_Deeplab
    from model.unet import unet_resnet50
    from model.deeplabv3 import resnet101_deeplabv3
    from dataset.voc_dataset import VOCDataSet

    from PIL import Image

    import matplotlib.pyplot as plt

    pretrianed_models_dict = {
        'semi0.125':
        'http://vllab1.ucmerced.edu/~whung/adv-semi-seg/AdvSemiSegVOC0.125-03c6f81c.pth',
        'semi0.25':
        'http://vllab1.ucmerced.edu/~whung/adv-semi-seg/AdvSemiSegVOC0.25-473f8a14.pth',
        'semi0.5':
        'http://vllab1.ucmerced.edu/~whung/adv-semi-seg/AdvSemiSegVOC0.5-acf6a654.pth',
        'advFull':
        'http://vllab1.ucmerced.edu/~whung/adv-semi-seg/AdvSegVOCFull-92fbc7ee.pth'
    }

    class VOCColorize(object):
        def __init__(self, n=22):
            self.cmap = color_map(22)
            self.cmap = torch.from_numpy(self.cmap[:n])

        def __call__(self, gray_image):
            size = gray_image.shape
            color_image = np.zeros((3, size[0], size[1]), dtype=np.uint8)

            for label in range(0, len(self.cmap)):
                mask = (label == gray_image)
                color_image[0][mask] = self.cmap[label][0]
                color_image[1][mask] = self.cmap[label][1]
                color_image[2][mask] = self.cmap[label][2]

            # handle void
            mask = (255 == gray_image)
            color_image[0][mask] = color_image[1][mask] = color_image[2][
                mask] = 255

            return color_image

    def color_map(N=256, normalized=False):
        def bitget(byteval, idx):
            return ((byteval & (1 << idx)) != 0)

        dtype = 'float32' if normalized else 'uint8'
        cmap = np.zeros((N, 3), dtype=dtype)
        for i in range(N):
            r = g = b = 0
            c = i
            for j in range(8):
                r = r | (bitget(c, 0) << 7 - j)
                g = g | (bitget(c, 1) << 7 - j)
                b = b | (bitget(c, 2) << 7 - j)
                c = c >> 3

            cmap[i] = np.array([r, g, b])

        cmap = cmap / 255 if normalized else cmap
        return cmap

    def get_iou(data_list,
                class_num,
                ignore_label,
                class_names,
                save_path=None):
        from multiprocessing import Pool
        from utils.evaluation import EvaluatorIoU

        evaluator = EvaluatorIoU(class_num)
        for truth, prediction in data_list:
            evaluator.sample(truth, prediction, ignore_value=ignore_label)

        per_class_iou = evaluator.score()
        mean_iou = per_class_iou.mean()

        for i, (class_name, iou) in enumerate(zip(class_names, per_class_iou)):
            print('class {:2d} {:12} IU {:.2f}'.format(i, class_name, iou))

        print('meanIOU: ' + str(mean_iou) + '\n')
        if save_path:
            with open(save_path, 'w') as f:
                for i, (class_name,
                        iou) in enumerate(zip(class_names, per_class_iou)):
                    f.write('class {:2d} {:12} IU {:.2f}'.format(
                        i, class_name, iou) + '\n')
                f.write('meanIOU: ' + str(mean_iou) + '\n')

    def show_all(gt, pred):
        import matplotlib.pyplot as plt
        from matplotlib import colors
        from mpl_toolkits.axes_grid1 import make_axes_locatable

        fig, axes = plt.subplots(1, 2)
        ax1, ax2 = axes

        colormap = [(0, 0, 0), (0.5, 0, 0), (0, 0.5, 0), (0.5, 0.5, 0),
                    (0, 0, 0.5), (0.5, 0, 0.5), (0, 0.5, 0.5), (0.5, 0.5, 0.5),
                    (0.25, 0, 0), (0.75, 0, 0), (0.25, 0.5, 0), (0.75, 0.5, 0),
                    (0.25, 0, 0.5), (0.75, 0, 0.5), (0.25, 0.5, 0.5),
                    (0.75, 0.5, 0.5), (0, 0.25, 0), (0.5, 0.25, 0),
                    (0, 0.75, 0), (0.5, 0.75, 0), (0, 0.25, 0.5)]
        cmap = colors.ListedColormap(colormap)
        bounds = [
            0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
            19, 20, 21
        ]
        norm = colors.BoundaryNorm(bounds, cmap.N)

        ax1.set_title('gt')
        ax1.imshow(gt, cmap=cmap, norm=norm)

        ax2.set_title('pred')
        ax2.imshow(pred, cmap=cmap, norm=norm)

        plt.show()

    torch_device = torch.device(device)

    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    if dataset == 'pascal_aug':
        ds = VOCDataSet()
    else:
        print('Dataset {} not yet supported'.format(dataset))
        return

    if arch == 'deeplab2':
        model = Res_Deeplab(num_classes=ds.num_classes)
    elif arch == 'unet_resnet50':
        model = unet_resnet50(num_classes=ds.num_classes)
    elif arch == 'resnet101_deeplabv3':
        model = resnet101_deeplabv3(num_classes=ds.num_classes)
    else:
        print('Architecture {} not supported'.format(arch))
        return

    if pretrained_model is not None:
        restore_from = pretrianed_models_dict[pretrained_model]

    if restore_from[:4] == 'http':
        saved_state_dict = model_zoo.load_url(restore_from)
    else:
        saved_state_dict = torch.load(restore_from)

    model.load_state_dict(saved_state_dict)

    model.eval()
    model = model.to(torch_device)

    ds_val_xy = ds.val_xy(crop_size=(505, 505),
                          scale=False,
                          mirror=False,
                          mean=model.MEAN,
                          std=model.STD)

    testloader = data.DataLoader(ds_val_xy,
                                 batch_size=1,
                                 shuffle=False,
                                 pin_memory=True)

    data_list = []

    colorize = VOCColorize()

    with torch.no_grad():
        for index, batch in enumerate(testloader):
            if index % 100 == 0:
                print('%d processd' % (index))
            image, label, size, name = batch
            size = size[0].numpy()
            image = torch.tensor(image, dtype=torch.float, device=torch_device)
            output = model(image)
            output = output.cpu().data[0].numpy()

            output = output[:, :size[0], :size[1]]
            gt = np.asarray(label[0].numpy()[:size[0], :size[1]], dtype=np.int)

            output = output.transpose(1, 2, 0)
            output = np.asarray(np.argmax(output, axis=2), dtype=np.int)

            filename = os.path.join(save_dir, '{}.png'.format(name[0]))
            color_file = Image.fromarray(
                colorize(output).transpose(1, 2, 0), 'RGB')
            color_file.save(filename)

            # show_all(gt, output)
            data_list.append([gt.flatten(), output.flatten()])

        filename = os.path.join(save_dir, 'result.txt')
        get_iou(data_list, ds.num_classes, ignore_label, ds.class_names,
                filename)
Ejemplo n.º 9
0
def train(log_file, arch, dataset, batch_size, iter_size, num_workers,
          partial_data, partial_data_size, partial_id, ignore_label, crop_size,
          eval_crop_size, is_training, learning_rate, learning_rate_d,
          supervised, lambda_adv_pred, lambda_semi, lambda_semi_adv, mask_t,
          semi_start, semi_start_adv, d_remain, momentum, not_restore_last,
          num_steps, power, random_mirror, random_scale, random_seed,
          restore_from, restore_from_d, eval_every, save_snapshot_every,
          snapshot_dir, weight_decay, device):
    settings = locals().copy()

    import cv2
    import torch
    import torch.nn as nn
    from torch.utils import data, model_zoo
    import numpy as np
    import pickle
    import torch.optim as optim
    import torch.nn.functional as F
    import scipy.misc
    import sys
    import os
    import os.path as osp
    import pickle

    from model.deeplab import Res_Deeplab
    from model.unet import unet_resnet50
    from model.deeplabv3 import resnet101_deeplabv3
    from model.discriminator import FCDiscriminator
    from utils.loss import CrossEntropy2d, BCEWithLogitsLoss2d
    from utils.evaluation import EvaluatorIoU
    from dataset.voc_dataset import VOCDataSet
    import logger

    torch_device = torch.device(device)

    import time

    if log_file != '' and log_file != 'none':
        if os.path.exists(log_file):
            print('Log file {} already exists; exiting...'.format(log_file))
            return

    with logger.LogFile(log_file if log_file != 'none' else None):
        if dataset == 'pascal_aug':
            ds = VOCDataSet(augmented_pascal=True)
        elif dataset == 'pascal':
            ds = VOCDataSet(augmented_pascal=False)
        else:
            print('Dataset {} not yet supported'.format(dataset))
            return

        print('Command: {}'.format(sys.argv[0]))
        print('Arguments: {}'.format(' '.join(sys.argv[1:])))
        print('Settings: {}'.format(', '.join([
            '{}={}'.format(k, settings[k])
            for k in sorted(list(settings.keys()))
        ])))

        print('Loaded data')

        def loss_calc(pred, label):
            """
            This function returns cross entropy loss for semantic segmentation
            """
            # out shape batch_size x channels x h x w -> batch_size x channels x h x w
            # label shape h x w x 1 x batch_size  -> batch_size x 1 x h x w
            label = label.long().to(torch_device)
            criterion = CrossEntropy2d()

            return criterion(pred, label)

        def lr_poly(base_lr, iter, max_iter, power):
            return base_lr * ((1 - float(iter) / max_iter)**(power))

        def adjust_learning_rate(optimizer, i_iter):
            lr = lr_poly(learning_rate, i_iter, num_steps, power)
            optimizer.param_groups[0]['lr'] = lr
            if len(optimizer.param_groups) > 1:
                optimizer.param_groups[1]['lr'] = lr * 10

        def adjust_learning_rate_D(optimizer, i_iter):
            lr = lr_poly(learning_rate_d, i_iter, num_steps, power)
            optimizer.param_groups[0]['lr'] = lr
            if len(optimizer.param_groups) > 1:
                optimizer.param_groups[1]['lr'] = lr * 10

        def one_hot(label):
            label = label.numpy()
            one_hot = np.zeros((label.shape[0], ds.num_classes, label.shape[1],
                                label.shape[2]),
                               dtype=label.dtype)
            for i in range(ds.num_classes):
                one_hot[:, i, ...] = (label == i)
            #handle ignore labels
            return torch.tensor(one_hot,
                                dtype=torch.float,
                                device=torch_device)

        def make_D_label(label, ignore_mask):
            ignore_mask = np.expand_dims(ignore_mask, axis=1)
            D_label = np.ones(ignore_mask.shape) * label
            D_label[ignore_mask] = ignore_label
            D_label = torch.tensor(D_label,
                                   dtype=torch.float,
                                   device=torch_device)

            return D_label

        h, w = map(int, eval_crop_size.split(','))
        eval_crop_size = (h, w)

        h, w = map(int, crop_size.split(','))
        crop_size = (h, w)

        # create network
        if arch == 'deeplab2':
            model = Res_Deeplab(num_classes=ds.num_classes)
        elif arch == 'unet_resnet50':
            model = unet_resnet50(num_classes=ds.num_classes)
        elif arch == 'resnet101_deeplabv3':
            model = resnet101_deeplabv3(num_classes=ds.num_classes)
        else:
            print('Architecture {} not supported'.format(arch))
            return

        # load pretrained parameters
        if restore_from[:4] == 'http':
            saved_state_dict = model_zoo.load_url(restore_from)
        else:
            saved_state_dict = torch.load(restore_from)

        # only copy the params that exist in current model (caffe-like)
        new_params = model.state_dict().copy()
        for name, param in new_params.items():
            if name in saved_state_dict and param.size(
            ) == saved_state_dict[name].size():
                new_params[name].copy_(saved_state_dict[name])
        model.load_state_dict(new_params)

        model.train()
        model = model.to(torch_device)

        # init D
        model_D = FCDiscriminator(num_classes=ds.num_classes)
        if restore_from_d is not None:
            model_D.load_state_dict(torch.load(restore_from_d))
        model_D.train()
        model_D = model_D.to(torch_device)

        print('Built model')

        if snapshot_dir is not None:
            if not os.path.exists(snapshot_dir):
                os.makedirs(snapshot_dir)

        ds_train_xy = ds.train_xy(crop_size=crop_size,
                                  scale=random_scale,
                                  mirror=random_mirror,
                                  range01=model.RANGE01,
                                  mean=model.MEAN,
                                  std=model.STD)
        ds_train_y = ds.train_y(crop_size=crop_size,
                                scale=random_scale,
                                mirror=random_mirror,
                                range01=model.RANGE01,
                                mean=model.MEAN,
                                std=model.STD)
        ds_val_xy = ds.val_xy(crop_size=eval_crop_size,
                              scale=False,
                              mirror=False,
                              range01=model.RANGE01,
                              mean=model.MEAN,
                              std=model.STD)

        train_dataset_size = len(ds_train_xy)

        if partial_data_size != -1:
            if partial_data_size > partial_data_size:
                print('partial-data-size > |train|: exiting')
                return

        if partial_data == 1.0 and (partial_data_size == -1 or
                                    partial_data_size == train_dataset_size):
            trainloader = data.DataLoader(ds_train_xy,
                                          batch_size=batch_size,
                                          shuffle=True,
                                          num_workers=5,
                                          pin_memory=True)

            trainloader_gt = data.DataLoader(ds_train_y,
                                             batch_size=batch_size,
                                             shuffle=True,
                                             num_workers=5,
                                             pin_memory=True)

            trainloader_remain = None
            print('|train|={}'.format(train_dataset_size))
            print('|val|={}'.format(len(ds_val_xy)))
        else:
            #sample partial data
            if partial_data_size != -1:
                partial_size = partial_data_size
            else:
                partial_size = int(partial_data * train_dataset_size)

            if partial_id is not None:
                train_ids = pickle.load(open(partial_id))
                print('loading train ids from {}'.format(partial_id))
            else:
                rng = np.random.RandomState(random_seed)
                train_ids = list(rng.permutation(train_dataset_size))

            if snapshot_dir is not None:
                pickle.dump(train_ids,
                            open(osp.join(snapshot_dir, 'train_id.pkl'), 'wb'))

            print('|train supervised|={}'.format(partial_size))
            print('|train unsupervised|={}'.format(train_dataset_size -
                                                   partial_size))
            print('|val|={}'.format(len(ds_val_xy)))

            print('supervised={}'.format(list(train_ids[:partial_size])))

            train_sampler = data.sampler.SubsetRandomSampler(
                train_ids[:partial_size])
            train_remain_sampler = data.sampler.SubsetRandomSampler(
                train_ids[partial_size:])
            train_gt_sampler = data.sampler.SubsetRandomSampler(
                train_ids[:partial_size])

            trainloader = data.DataLoader(ds_train_xy,
                                          batch_size=batch_size,
                                          sampler=train_sampler,
                                          num_workers=3,
                                          pin_memory=True)
            trainloader_remain = data.DataLoader(ds_train_xy,
                                                 batch_size=batch_size,
                                                 sampler=train_remain_sampler,
                                                 num_workers=3,
                                                 pin_memory=True)
            trainloader_gt = data.DataLoader(ds_train_y,
                                             batch_size=batch_size,
                                             sampler=train_gt_sampler,
                                             num_workers=3,
                                             pin_memory=True)

            trainloader_remain_iter = enumerate(trainloader_remain)

        testloader = data.DataLoader(ds_val_xy,
                                     batch_size=1,
                                     shuffle=False,
                                     pin_memory=True)

        print('Data loaders ready')

        trainloader_iter = enumerate(trainloader)
        trainloader_gt_iter = enumerate(trainloader_gt)

        # implement model.optim_parameters(args) to handle different models' lr setting

        # optimizer for segmentation network
        optimizer = optim.SGD(model.optim_parameters(learning_rate),
                              lr=learning_rate,
                              momentum=momentum,
                              weight_decay=weight_decay)
        optimizer.zero_grad()

        # optimizer for discriminator network
        optimizer_D = optim.Adam(model_D.parameters(),
                                 lr=learning_rate_d,
                                 betas=(0.9, 0.99))
        optimizer_D.zero_grad()

        # loss/ bilinear upsampling
        bce_loss = BCEWithLogitsLoss2d()

        print('Built optimizer')

        # labels for adversarial training
        pred_label = 0
        gt_label = 1

        loss_seg_value = 0
        loss_adv_pred_value = 0
        loss_D_value = 0
        loss_semi_mask_accum = 0
        loss_semi_value = 0
        loss_semi_adv_value = 0

        t1 = time.time()

        print('Training for {} steps...'.format(num_steps))
        for i_iter in range(num_steps + 1):

            model.train()
            model.freeze_batchnorm()

            optimizer.zero_grad()
            adjust_learning_rate(optimizer, i_iter)
            optimizer_D.zero_grad()
            adjust_learning_rate_D(optimizer_D, i_iter)

            for sub_i in range(iter_size):

                # train G

                if not supervised:
                    # don't accumulate grads in D
                    for param in model_D.parameters():
                        param.requires_grad = False

                # do semi first
                if not supervised and (lambda_semi > 0 or lambda_semi_adv > 0 ) and i_iter >= semi_start_adv and \
                        trainloader_remain is not None:
                    try:
                        _, batch = next(trainloader_remain_iter)
                    except:
                        trainloader_remain_iter = enumerate(trainloader_remain)
                        _, batch = next(trainloader_remain_iter)

                    # only access to img
                    images, _, _, _ = batch
                    images = images.float().to(torch_device)

                    pred = model(images)
                    pred_remain = pred.detach()

                    D_out = model_D(F.softmax(pred, dim=1))
                    D_out_sigmoid = F.sigmoid(
                        D_out).data.cpu().numpy().squeeze(axis=1)

                    ignore_mask_remain = np.zeros(D_out_sigmoid.shape).astype(
                        np.bool)

                    loss_semi_adv = lambda_semi_adv * bce_loss(
                        D_out, make_D_label(gt_label, ignore_mask_remain))
                    loss_semi_adv = loss_semi_adv / iter_size

                    #loss_semi_adv.backward()
                    loss_semi_adv_value += float(
                        loss_semi_adv) / lambda_semi_adv

                    if lambda_semi <= 0 or i_iter < semi_start:
                        loss_semi_adv.backward()
                        loss_semi_value = 0
                    else:
                        # produce ignore mask
                        semi_ignore_mask = (D_out_sigmoid < mask_t)

                        semi_gt = pred.data.cpu().numpy().argmax(axis=1)
                        semi_gt[semi_ignore_mask] = ignore_label

                        semi_ratio = 1.0 - float(
                            semi_ignore_mask.sum()) / semi_ignore_mask.size

                        loss_semi_mask_accum += float(semi_ratio)

                        if semi_ratio == 0.0:
                            loss_semi_value += 0
                        else:
                            semi_gt = torch.FloatTensor(semi_gt)

                            loss_semi = lambda_semi * loss_calc(pred, semi_gt)
                            loss_semi = loss_semi / iter_size
                            loss_semi_value += float(loss_semi) / lambda_semi
                            loss_semi += loss_semi_adv
                            loss_semi.backward()

                else:
                    loss_semi = None
                    loss_semi_adv = None

                # train with source

                try:
                    _, batch = next(trainloader_iter)
                except:
                    trainloader_iter = enumerate(trainloader)
                    _, batch = next(trainloader_iter)

                images, labels, _, _ = batch
                images = images.float().to(torch_device)
                ignore_mask = (labels.numpy() == ignore_label)
                pred = model(images)

                loss_seg = loss_calc(pred, labels)

                if supervised:
                    loss = loss_seg
                else:
                    D_out = model_D(F.softmax(pred, dim=1))

                    loss_adv_pred = bce_loss(
                        D_out, make_D_label(gt_label, ignore_mask))

                    loss = loss_seg + lambda_adv_pred * loss_adv_pred
                    loss_adv_pred_value += float(loss_adv_pred) / iter_size

                # proper normalization
                loss = loss / iter_size
                loss.backward()
                loss_seg_value += float(loss_seg) / iter_size

                if not supervised:
                    # train D

                    # bring back requires_grad
                    for param in model_D.parameters():
                        param.requires_grad = True

                    # train with pred
                    pred = pred.detach()

                    if d_remain:
                        pred = torch.cat((pred, pred_remain), 0)
                        ignore_mask = np.concatenate(
                            (ignore_mask, ignore_mask_remain), axis=0)

                    D_out = model_D(F.softmax(pred, dim=1))
                    loss_D = bce_loss(D_out,
                                      make_D_label(pred_label, ignore_mask))
                    loss_D = loss_D / iter_size / 2
                    loss_D.backward()
                    loss_D_value += float(loss_D)

                    # train with gt
                    # get gt labels
                    try:
                        _, batch = next(trainloader_gt_iter)
                    except:
                        trainloader_gt_iter = enumerate(trainloader_gt)
                        _, batch = next(trainloader_gt_iter)

                    _, labels_gt, _, _ = batch
                    D_gt_v = one_hot(labels_gt)
                    ignore_mask_gt = (labels_gt.numpy() == ignore_label)

                    D_out = model_D(D_gt_v)
                    loss_D = bce_loss(D_out,
                                      make_D_label(gt_label, ignore_mask_gt))
                    loss_D = loss_D / iter_size / 2
                    loss_D.backward()
                    loss_D_value += float(loss_D)

            optimizer.step()
            optimizer_D.step()

            sys.stdout.write('.')
            sys.stdout.flush()

            if i_iter % eval_every == 0 and i_iter != 0:
                model.eval()
                with torch.no_grad():
                    evaluator = EvaluatorIoU(ds.num_classes)
                    for index, batch in enumerate(testloader):
                        image, label, size, name = batch
                        size = size[0].numpy()
                        image = image.float().to(torch_device)
                        output = model(image)
                        output = output.cpu().data[0].numpy()

                        output = output[:, :size[0], :size[1]]
                        gt = np.asarray(label[0].numpy()[:size[0], :size[1]],
                                        dtype=np.int)

                        output = output.transpose(1, 2, 0)
                        output = np.asarray(np.argmax(output, axis=2),
                                            dtype=np.int)

                        evaluator.sample(gt, output, ignore_value=ignore_label)

                        sys.stdout.write('+')
                        sys.stdout.flush()

                per_class_iou = evaluator.score()
                mean_iou = per_class_iou.mean()

                loss_seg_value /= eval_every
                loss_adv_pred_value /= eval_every
                loss_D_value /= eval_every
                loss_semi_mask_accum /= eval_every
                loss_semi_value /= eval_every
                loss_semi_adv_value /= eval_every

                sys.stdout.write('\n')

                t2 = time.time()

                print(
                    'iter = {:8d}/{:8d}, took {:.3f}s, loss_seg = {:.6f}, loss_adv_p = {:.6f}, loss_D = {:.6f}, loss_semi_mask_rate = {:.3%} loss_semi = {:.6f}, loss_semi_adv = {:.3f}'
                    .format(i_iter, num_steps, t2 - t1, loss_seg_value,
                            loss_adv_pred_value, loss_D_value,
                            loss_semi_mask_accum, loss_semi_value,
                            loss_semi_adv_value))

                for i, (class_name,
                        iou) in enumerate(zip(ds.class_names, per_class_iou)):
                    print('class {:2d} {:12} IU {:.2f}'.format(
                        i, class_name, iou))

                print('meanIOU: ' + str(mean_iou) + '\n')

                loss_seg_value = 0
                loss_adv_pred_value = 0
                loss_D_value = 0
                loss_semi_value = 0
                loss_semi_mask_accum = 0
                loss_semi_adv_value = 0

                t1 = t2

            if snapshot_dir is not None and i_iter % save_snapshot_every == 0 and i_iter != 0:
                print('taking snapshot ...')
                torch.save(
                    model.state_dict(),
                    osp.join(snapshot_dir, 'VOC_' + str(i_iter) + '.pth'))
                torch.save(
                    model_D.state_dict(),
                    osp.join(snapshot_dir, 'VOC_' + str(i_iter) + '_D.pth'))

        if snapshot_dir is not None:
            print('save model ...')
            torch.save(
                model.state_dict(),
                osp.join(snapshot_dir, 'VOC_' + str(num_steps) + '.pth'))
            torch.save(
                model_D.state_dict(),
                osp.join(snapshot_dir, 'VOC_' + str(num_steps) + '_D.pth'))
Ejemplo n.º 10
0
def main():
    """Create the model and start the evaluation process."""
    args = get_arguments()
    gpu0 = args.gpu

    print("Evaluating model")
    print(args.restore_from)
    print("classifier model")
    print(args.restore_from_classifier)
    print("sigmoid threshold")
    print(args.sigmoid_threshold)

    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)

    model = Res_Deeplab(num_classes=args.num_classes)
    model_cls = Res_Deeplab_class(num_classes=args.num_classes,
                                  mode=6,
                                  latent_vars=args.latent_vars)

    saved_state_dict = torch.load(args.restore_from)
    model.load_state_dict(saved_state_dict, strict=False)

    model.eval()
    model.cuda(gpu0)

    saved_state_dict = torch.load(args.restore_from_classifier)
    model_cls.load_state_dict(saved_state_dict, strict=False)

    model_cls.eval()
    model_cls.cuda(gpu0)

    testloader = data.DataLoader(VOCDataSet(args.data_dir,
                                            args.data_list,
                                            crop_size=(505, 505),
                                            mean=IMG_MEAN,
                                            scale=False,
                                            mirror=False),
                                 batch_size=1,
                                 shuffle=False,
                                 pin_memory=True)

    if version.parse(torch.__version__) >= version.parse('0.4.0'):
        interp = nn.Upsample(size=(505, 505),
                             mode='bilinear',
                             align_corners=True)
    else:
        interp = nn.Upsample(size=(505, 505), mode='bilinear')
    data_list = []

    colorize = VOCColorize()

    combo_matrix = np.zeros((args.num_classes, args.latent_vars + 1),
                            dtype=np.float32)
    for index, batch in enumerate(testloader):
        if index % 100 == 0:
            print('%d processd' % (index))
        image, label, size, name = batch
        size = size[0].numpy()
        output = model(Variable(image, volatile=True).cuda(gpu0))
        output = interp(output).cpu().data[0].numpy()

        output = output[:, :size[0], :size[1]]

        cls_pred = F.sigmoid(
            model_cls(Variable(image, volatile=True).cuda(gpu0)))
        cls_pred = cls_pred.cpu().data.numpy()[0]

        gt = np.asarray(label[0].numpy()[:size[0], :size[1]], dtype=np.int)

        #gt_classes = np.unique(gt).tolist()
        for clsID in range(1, args.num_classes):
            if cls_pred[clsID - 1] < args.sigmoid_threshold:
                output[clsID, :, :] = -1000000000

        output = output.transpose(1, 2, 0)
        output = np.asarray(np.argmax(output, axis=2), dtype=np.int)

        filename = os.path.join(args.save_dir, '{}.png'.format(name[0]))
        color_file = Image.fromarray(
            colorize(output).transpose(1, 2, 0), 'RGB')
        #color_file.save(filename)

        #filename = os.path.join(args.save_dir, '{}_lv.png'.format(name[0]))
        #color_file = Image.fromarray(colorize(output_lv).transpose(1, 2, 0), 'RGB')
        #color_file.save(filename)

        filename_gt = os.path.join(args.save_dir, '{}_gt.png'.format(name[0]))
        color_file_gt = Image.fromarray(colorize(gt).transpose(1, 2, 0), 'RGB')
        #color_file_gt.save(filename_gt)

        # show_all(gt, output)
        data_list.append([gt.flatten(), output.flatten()])

    filename = os.path.join(
        args.save_dir,
        args.restore_from.split('/')[-1][:-4] + '_with_classifier_result.txt')
    confusion_matrix = get_iou(data_list, args.num_classes, filename)
Ejemplo n.º 11
0
def main():
    """Create the model and start the evaluation process."""
    args = get_arguments()

    gpu0 = args.gpu

    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)

    model = Res_Deeplab(num_classes=args.num_classes)

    # if args.pretrained_model != None:
    #     args.restore_from = pretrianed_models_dict[args.pretrained_model]
    #
    # if args.restore_from[:4] == 'http' :
    #     saved_state_dict = model_zoo.load_url(args.restore_from)
    # else:
    #     saved_state_dict = torch.load(args.restore_from)
    #model.load_state_dict(saved_state_dict)

    model = Res_Deeplab(num_classes=args.num_classes)
    #model.load_state_dict(torch.load('/data/wyc/AdvSemiSeg/snapshots/VOC_15000.pth'))#70.7
    state_dict = torch.load(
        '/data1/wyc/AdvSemiSeg/snapshots/VOC_t_baseline_1adv_mul_20000.pth'
    )  #baseline707 adv 709 nadv 705()*2#n adv0.694

    # state_dict = torch.load(
    #     '/home/wyc/VOC_t_baseline_nadv2_20000.pth')  # baseline707 adv 709 nadv 705()*2

    # original saved file with DataParallel

    # create new OrderedDict that does not contain `module.`
    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        name = k[7:]  # remove `module.`
        new_state_dict[name] = v
    # load params

    new_params = model.state_dict().copy()
    for name, param in new_params.items():
        print(name)
        if name in new_state_dict and param.size(
        ) == new_state_dict[name].size():
            new_params[name].copy_(new_state_dict[name])
            print('copy {}'.format(name))

    model.load_state_dict(new_params)

    model.eval()
    model.cuda(gpu0)

    testloader = data.DataLoader(VOCDataSet(args.data_dir,
                                            args.data_list,
                                            crop_size=(505, 505),
                                            mean=IMG_MEAN,
                                            scale=False,
                                            mirror=False),
                                 batch_size=1,
                                 shuffle=False,
                                 pin_memory=True)

    if version.parse(torch.__version__) >= version.parse('0.4.0'):
        interp = nn.Upsample(size=(505, 505),
                             mode='bilinear',
                             align_corners=True)
    else:
        interp = nn.Upsample(size=(505, 505), mode='bilinear')
    data_list = []

    colorize = VOCColorize()

    tag = 0

    for index, batch in enumerate(testloader):
        if index % 100 == 0:
            print('%d processd' % (index))
        image, label, size, name = batch
        size = size[0].numpy()
        output = model(Variable(image, volatile=True).cuda(gpu0))
        pred = interp(output)
        pred01 = F.softmax(pred, dim=1)
        output = interp(output).cpu().data[0].numpy()
        image = Variable(image).cuda()

        pred_re = F.softmax(pred, dim=1).repeat(1, 3, 1, 1)

        indices_1 = torch.index_select(image, 1,
                                       Variable(torch.LongTensor([0])).cuda())
        indices_2 = torch.index_select(image, 1,
                                       Variable(torch.LongTensor([1])).cuda())
        indices_3 = torch.index_select(image, 1,
                                       Variable(torch.LongTensor([2])).cuda())
        img_re = torch.cat([
            indices_1.repeat(1, 21, 1, 1),
            indices_2.repeat(1, 21, 1, 1),
            indices_3.repeat(1, 21, 1, 1),
        ], 1)

        mul_img = pred_re * img_re

        for i_l in range(label.shape[0]):
            label_set = np.unique(label[i_l]).tolist()
            for ls in label_set:
                if ls != 0 and ls != 255:
                    ls = int(ls)

                    img_p = torch.cat([
                        mul_img[i_l][ls].unsqueeze(0).unsqueeze(0),
                        mul_img[i_l][ls + 21].unsqueeze(0).unsqueeze(0),
                        mul_img[i_l][ls + 21 + 21].unsqueeze(0).unsqueeze(0)
                    ], 1)

                    imgs = img_p.squeeze()
                    imgs = imgs.transpose(0, 1)
                    imgs = imgs.transpose(1, 2)
                    imgs = imgs.data.cpu().numpy()

                    img_ori = image[0]
                    img_ori = img_ori.squeeze()
                    img_ori = img_ori.transpose(0, 1)
                    img_ori = img_ori.transpose(1, 2)
                    img_ori = img_ori.data.cpu().numpy()

                    pred_ori = pred01[0][ls]
                    pred_ori = pred_ori.data.cpu().numpy()
                    pred_0 = pred_ori.copy()

                    pred_ori = pred_ori

                    size = pred_ori.shape
                    color_image = np.zeros((3, size[0], size[1]),
                                           dtype=np.uint8)

                    for i in range(size[0]):
                        for j in range(size[1]):
                            if pred_0[i][j] > 0.995:
                                color_image[0][i][j] = 0
                                color_image[1][i][j] = 255
                                color_image[2][i][j] = 0
                            elif pred_0[i][j] > 0.9:
                                color_image[0][i][j] = 255
                                color_image[1][i][j] = 0
                                color_image[2][i][j] = 0
                            elif pred_0[i][j] > 0.7:
                                color_image[0][i][j] = 0
                                color_image[1][i][j] = 0
                                color_image[2][i][j] = 255

                    color_image = color_image.transpose((1, 2, 0))

                    # print pred_ori.shape

                    cv2.imwrite(
                        osp.join('/data1/wyc/AdvSemiSeg/vis/img_pred',
                                 name[0] + '.png'), imgs)
                    cv2.imwrite(
                        osp.join('/data1/wyc/AdvSemiSeg/vis/image',
                                 name[0] + '.png'), img_ori)
                    cv2.imwrite(
                        osp.join('/data1/wyc/AdvSemiSeg/vis/pred',
                                 name[0] + '.png'), color_image)

        output = output[:, :size[0], :size[1]]
        gt = np.asarray(label[0].numpy()[:size[0], :size[1]], dtype=np.int)

        output = output.transpose(1, 2, 0)
        output = np.asarray(np.argmax(output, axis=2), dtype=np.int)

        filename = os.path.join(args.save_dir, '{}.png'.format(name[0]))
        color_file = Image.fromarray(
            colorize(output).transpose(1, 2, 0), 'RGB')
        color_file.save(filename)

        # show_all(gt, output)
        data_list.append([gt.flatten(), output.flatten()])

    filename = os.path.join(args.save_dir, 'result.txt')
    get_iou(data_list, args.num_classes, filename)