def main():
    """Create the model and start the evaluation process."""

    args = get_arguments()

    batchsize = args.batchsize
    gpu0 = args.gpu

    if not os.path.exists(args.save):
        os.makedirs(args.save)

    model_dict = {}

    private_code_size = 8
    shared_code_channels = 2048

    enc_shared = SharedEncoder().cuda(gpu0)

    model_dict['enc_shared'] = enc_shared

    load_models(model_dict, args.weight_dir)

    enc_shared.eval()                           # load model done

    scale = 1.25
    '''
    #for cityscape 
    testloader = data.DataLoader(cityscapesDataSet(args.data_dir, args.data_list, crop_size=(512, 1024), resize_size=(1024, 512), mean=IMG_MEAN, scale=False, mirror=False, set=args.set),
                                   batch_size=batchsize, shuffle=False, pin_memory=True, num_workers=4)
    testloader2 = data.DataLoader(cityscapesDataSet(args.data_dir, args.data_list, crop_size=(round(512*scale), round(1024*scale) ), resize_size=( round(1024*scale), round(512*scale)), mean=IMG_MEAN, scale=False, mirror=False, set=args.set),
                                   batch_size=batchsize, shuffle=False, pin_memory=True, num_workers=4)
    '''

    # for foggyzurich
    testloader = data.DataLoader(
        cityscapesDataSet(args.data_dir, args.data_list, crop_size=(540, 960), resize_size=(960, 540), mean=IMG_MEAN,
                          scale=False, mirror=False, set=args.set), batch_size=batchsize, shuffle=False,
        pin_memory=True, num_workers=4)
    testloader2 = data.DataLoader(
        cityscapesDataSet(args.data_dir, args.data_list, crop_size=(round(540 * scale), round(960 * scale)),
                          resize_size=(round(960 * scale), round(540 * scale)), mean=IMG_MEAN, scale=False,
                          mirror=False, set=args.set),
        batch_size=batchsize, shuffle=False, pin_memory=True, num_workers=4)


    if version.parse(torch.__version__) >= version.parse('0.4.0'):
        # interp = nn.Upsample(size=(1024, 2048), mode='bilinear', align_corners=True)  # for cityscapes
        interp = nn.Upsample(size=(1080, 1920), mode='bilinear', align_corners=True)  # for foggyzurich
        # interp = nn.Upsample(size=(256, 512), mode='bilinear', align_corners=True)      # for Yan's data
    else:
        interp = nn.Upsample(size=(1024, 2048), mode='bilinear')

    sm = torch.nn.Softmax(dim=1)
    log_sm = torch.nn.LogSoftmax(dim=1)
    kl_distance = nn.KLDivLoss(reduction='none')
    heatmap_score = []
    for index, img_data in enumerate(zip(testloader, testloader2)):
        batch, batch2 = img_data
        image,  _, name = batch
        image2, _, name2 = batch2
        print(image.shape)

        inputs = image.cuda()
        inputs2 = image2.cuda()
        print('\r>>>>Extracting feature...%04d/%04d \t' % (index * batchsize, NUM_STEPS), end='')
        if args.model == 'DeepLab':
            with torch.no_grad():
                _, output1, output2,_ = enc_shared(inputs)
                output_batch = interp(sm(0.5 * output1 + output2))

                heatmap_batch = torch.sum(kl_distance(log_sm(output1), sm(output2)), dim=1)  # variance_map batch

                _, output1, output2, _ = enc_shared(fliplr(inputs))
                output1, output2 = fliplr(output1), fliplr(output2)
                output_batch += interp(sm(0.5 * output1 + output2))
                del output1, output2, inputs

                _, output1, output2, _ = enc_shared(inputs2)
                output_batch += interp(sm(0.5 * output1 + output2))
                _, output1, output2, _ = enc_shared(fliplr(inputs2))
                output1, output2 = fliplr(output1), fliplr(output2)
                output_batch += interp(sm(0.5 * output1 + output2))
                del output1, output2, inputs2
                output_batch = output_batch.cpu().data.numpy()
                heatmap_batch = heatmap_batch.cpu().data.numpy()
                heatmap_mean = np.mean(heatmap_batch)
                heatmap_score.append(heatmap_mean)
                print(heatmap_mean)
        elif args.model == 'DeeplabVGG' or args.model == 'Oracle':
            output_batch = enc_shared(Variable(image).cuda())
            output_batch = interp(output_batch).cpu().data.numpy()


        # output_batch = output_batch.transpose(0,2,3,1)
        # output_batch = np.asarray(np.argmax(output_batch, axis=3), dtype=np.uint8)
        output_batch = output_batch.transpose(0, 2, 3, 1)
        score_batch = np.max(output_batch, axis=3)
        output_batch = np.asarray(np.argmax(output_batch, axis=3), dtype=np.uint8)
        # output_batch[score_batch<3.2] = 255  #3.2 = 4*0.8
        for i in range(output_batch.shape[0]):
            output = output_batch[i, :, :]
            output_col = colorize_mask(output)
            output = Image.fromarray(output)

            if SET == 'train':
                name_tmp = name[i].split('/')[-1]
            else:
                name_tmp = name[i]

            save_path = args.save

            # save_path = re.replace(save_path, 'leftImg8bit', 'pseudo')
            # print(save_path)
            if not os.path.isdir(save_path):
                os.mkdir(save_path)
            output.save('%s/%s' % (save_path, name_tmp))
            print('%s/%s' % (save_path, name_tmp))
            '''
            output_col.save('%s/%s_color.png' % (save_path, name_tmp.split('.png')[0]))
            
            heatmap_tmp = heatmap_batch[i, :, :] / np.max(heatmap_batch[i, :, :])   # max normalization
            fig = plt.figure()
            plt.axis('off')
            heatmap = plt.imshow(heatmap_tmp, cmap='viridis')
            # fig.colorbar(heatmap)
            fig.savefig('%s/%s_var_map.png' % (save_path, name_tmp.split('.png')[0]))
            '''
    return args.save
dis_opt_list = []

# Optimizer list for quickly adjusting learning rate
seg_opt_list.append(enc_shared_opt)
dclf_opt_list.append(dclf1_opt)
dclf_opt_list.append(dclf2_opt)
rec_opt_list.append(enc_s_opt)
rec_opt_list.append(enc_t_opt)
rec_opt_list.append(dec_s_opt)
rec_opt_list.append(dec_t_opt)
dis_opt_list.append(dis_s2t_opt)
dis_opt_list.append(dis_t2s_opt)

# load_models(model_dict, './results/fz_clean/weight_22500')
#enc_shared.load_state_dict(torch.load('./weight_gta2city/enc_shared.pth'))
load_models(model_dict,
            load_model_path)  # model_dict is a dict of all network structure

# reload params of enc_shared
# enc_shared.load_state_dict(torch.load('./results/2clean2fz_medium_new_var/s2t1weight_best/enc_shared.pth'))
# model_dict['enc_shared'] = enc_shared

cudnn.enabled = True
cudnn.benchmark = True

mse_loss = nn.MSELoss(size_average=True).cuda()
bce_loss = nn.BCEWithLogitsLoss().cuda()
sg_loss = cross_entropy2d
VGG_loss = VGGLoss()
VGG_loss_for_trans = VGGLoss_for_trans()

upsample_256 = nn.Upsample(size=[256, 512], mode='bilinear')
Exemple #3
0
                          mean=IMG_MEAN,
                          set='test')
test_loader = torch_data.DataLoader(test_set,
                                    batch_size=1,
                                    shuffle=False,
                                    num_workers=4,
                                    pin_memory=True)

upsample_1024 = nn.Upsample(size=[1024, 2048], mode='bilinear')

model_dict = {}

enc_shared = SharedEncoder().cuda(args.gpu)
model_dict['enc_shared'] = enc_shared

load_models(model_dict, args.weight_dir)

enc_shared.eval()
cty_running_metrics = runningScore(num_classes)
for i_test, (images_test, name) in tqdm(enumerate(test_loader)):
    images_test = Variable(images_test.cuda(), volatile=True)

    _, _, pred, _ = enc_shared(images_test)
    pred = upsample_1024(pred)

    pred = pred.data.cpu().numpy()[0]
    pred = pred.transpose(1, 2, 0)
    pred = np.asarray(np.argmax(pred, axis=2), dtype=np.uint8)
    pred = np.asarray(test_set.convert_back_to_id(pred), dtype=np.uint8)
    pred = Image.fromarray(pred)
Exemple #4
0
dclf_opt_list = []
rec_opt_list = []
dis_opt_list = []

# Optimizer list for quickly adjusting learning rate
seg_opt_list.append(enc_shared_opt)
dclf_opt_list.append(dclf1_opt)
dclf_opt_list.append(dclf2_opt)
rec_opt_list.append(enc_s_opt)
rec_opt_list.append(enc_t_opt)
rec_opt_list.append(dec_s_opt)
rec_opt_list.append(dec_t_opt)
dis_opt_list.append(dis_s2t_opt)
dis_opt_list.append(dis_t2s_opt)

load_models(model_dict, load_model_path)
#enc_shared.load_state_dict(torch.load('./weight_gta2city/enc_shared.pth'))
#load_models(model_dict, './weight_gta2city')   # model_dict is a dict of all network structure
'''
# reload params of enc_shared
pretrained_dict = convert_state_dict(torch.load('./weight_gta2city/enc_shared.pth'))
model_dict_enc = enc_shared.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict_enc}  # filter out unnecessary keys
model_dict_enc.update(pretrained_dict)
enc_shared.load_state_dict(model_dict_enc)    # already updated enc_shared network
model_dict['enc_shared'] = enc_shared
'''

cudnn.enabled = True
cudnn.benchmark = True