Example #1
0
def main():
    """Create the model and start the evaluation process."""

    args = get_arguments()

    gpu0 = args.gpu

    if not os.path.exists(args.save):
        os.makedirs(args.save)

    model = Res_Deeplab(num_classes=args.num_classes)
    #model = getVGG(num_classes=args.num_classes)

    if args.restore_from[:4] == 'http':
        saved_state_dict = model_zoo.load_url(args.restore_from)
    else:
        saved_state_dict = torch.load(args.restore_from)
    model.load_state_dict(saved_state_dict)

    model.eval()
    model.cuda(gpu0)

    testloader = data.DataLoader(cityscapesDataSet(args.data_dir,
                                                   args.data_list,
                                                   crop_size=(1024, 512),
                                                   mean=IMG_MEAN,
                                                   scale=False,
                                                   mirror=False,
                                                   set=args.set),
                                 batch_size=1,
                                 shuffle=False,
                                 pin_memory=True)

    #testloader = data.DataLoader(GTA5DataSet(args.data_dir, args.data_list, crop_size=(1024, 512), mean=IMG_MEAN, scale=False, mirror=False), batch_size=1, shuffle=False, pin_memory=True)

    interp = nn.Upsample(size=(1024, 2048), mode='bilinear')

    for index, batch in enumerate(testloader):
        if index % 100 == 0:
            print '%d processd' % index
        image, _, name = batch
        #image, _, _, name = batch
        output1, output2 = model(Variable(image, volatile=True).cuda(gpu0))
        #import torch.nn.functional as F
        #output2 = F.avg_pool2d(output2, (4, 4))

        #print(output1.shape)
        #print(output2.shape)
        #exit()
        output = interp(output2).cpu().data[0].numpy()

        output = output.transpose(1, 2, 0)
        output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)

        output_col = colorize_mask(output)
        output = Image.fromarray(output)

        name = name[0].split('/')[-1]
        output.save('%s/%s' % (args.save, name))
        output_col.save('%s/%s_color.png' % (args.save, name.split('.')[0]))
Example #2
0
def main():
    """Create the model and start the evaluation process."""

    args = get_arguments()

    gpu0 = args.gpu

    for iter in range(ITER_START, ITER_END + 1, SPAN):

        print('{0} /{1} processed'.format(iter, ITER_END))

        if not os.path.exists(args.save.format(iter)):
            os.makedirs(args.save.format(iter))

        model = Res_Deeplab(num_classes=args.num_classes)

        saved_state_dict = torch.load(args.restore_from.format(iter))
        for k, v in saved_state_dict.items():
            if k.count('num_batches_tracked'):
                del saved_state_dict[k]
        model.load_state_dict(saved_state_dict)

        model.eval()
        model.cuda(gpu0)

        testloader = data.DataLoader(cityscapesDataSet(args.data_dir,
                                                       args.data_list,
                                                       crop_size=(1024, 512),
                                                       mean=IMG_MEAN,
                                                       scale=False,
                                                       mirror=False,
                                                       set=args.set),
                                     batch_size=1,
                                     shuffle=False,
                                     pin_memory=True)

        interp = nn.Upsample(size=(1024, 2048), mode='bilinear')

        for index, batch in enumerate(testloader):
            if index % 100 == 0:
                print '%d processd of %d' % (index, len(testloader))
            image, _, name = batch
            output1, output2 = model(Variable(image, volatile=True).cuda(gpu0))
            output = interp(output2).cpu().data[0].numpy()

            output = output.transpose(1, 2, 0)
            output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)

            output_col = colorize_mask(output)
            # output = Image.fromarray(output)

            name = name[0].split('/')[-1]
            # output.save('%s/%s' % (args.save, name))
            output_col.save('%s/%s_color.png' %
                            (args.save.format(iter), name.split('.')[0]))
Example #3
0
def show_val(seg_state_dict, save_dir, gpu_id):
    """Create the model and start the evaluation process."""

    # args = get_arguments()
    # save_dir=save_path.format(iter)
    gpu0 = gpu_id

    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    model = Res_Deeplab(num_classes=NUM_CLASSES)
    model = nn.DataParallel(model, device_ids=device_ids)
    model.load_state_dict(seg_state_dict)

    model.eval()
    model.cuda(gpu0)

    testloader = data.DataLoader(cityscapesDataSet(DATA_DIRECTORY,
                                                   DATA_LIST_PATH,
                                                   crop_size=(1024, 512),
                                                   mean=IMG_MEAN,
                                                   scale=False,
                                                   mirror=False,
                                                   set=SET),
                                 batch_size=1,
                                 shuffle=False,
                                 pin_memory=True)

    # interp = nn.Upsample(size=(1024, 2048), mode='bilinear')
    # 作者建议:
    interp = nn.Upsample(size=(1024, 2048),
                         mode='bilinear',
                         align_corners=True)

    for index, batch in enumerate(testloader):
        image, _, name = batch
        with torch.no_grad():
            output1, output2 = model(Variable(image).cuda(gpu0))
        output = interp(output2).cpu().data[0].numpy()

        output = output.transpose(1, 2, 0)
        output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)

        output_col = colorize_mask(output)

        name = name[0].split('/')[-1]
        output_col.save('%s/%s.png' % (save_dir, name.split('.')[0]))
    print('colored pictures saving is done')
    mIoUs = compute_mIoU(gtDir, save_dir, devkitDir)
    return mIoUs
Example #4
0
def show_val(seg_state_dict, show_pred_sv_dir, city='ROME'):
    """Create the model and start the evaluation process."""

    # args = get_arguments()
    save_dir=show_pred_sv_dir
    gpu0 = 0

    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    model = Res_Deeplab(num_classes=NUM_CLASSES)
    model.load_state_dict(seg_state_dict)

    model.eval()
    model.cuda(gpu0)

    testloader = data.DataLoader(MixedDataSet(DATA_DIRECTORY, DATA_LIST_PATH, crop_size=(1024, 512), mean=IMG_MEAN, scale=False, mirror=False, set=SET),
                                    batch_size=1, shuffle=False, pin_memory=True)

    interp = nn.Upsample(size=(1024, 2048), mode='bilinear')
    # n=0
    for index, batch in enumerate(testloader):
        # n+=1
        # if n>3:
        #     continue
        image, _, name = batch
        # _, output1, output2 = model(Variable(image, requires_grad=True).cuda(gpu0)) #ada_deeplab_multi
        output1, output2 = model(Variable(image, requires_grad=True).cuda(gpu0))
        output = interp(output2).cpu().data[0].numpy()

        output = output.transpose(1,2,0)
        output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)

        output_col = colorize_mask(output)

        name = name[0].split('/')[-1]
        output_col.save('%s/%s.png' % (save_dir, name.split('.')[0]))
    print('colored pictures saving is done')
    mIoUs, syn_mIoUs=compute_mIoU(gtDir, save_dir, devkitDir)
    return mIoUs, syn_mIoUs
Example #5
0
def main():
    """Create the model and start the evaluation process."""
    import warnings
    if not sys.warnoptions:
        warnings.simplefilter("ignore")

    args = get_arguments()

    gpu0 = args.gpu

    if not os.path.exists(args.save):
        os.makedirs(args.save)

    model = Res_Deeplab(num_classes=args.num_classes)
    #from pytorchgo.model.MyFCN8s import MyFCN8s
    #model = MyFCN8s(n_class=NUM_CLASSES)

    if args.restore_from[:4] == 'http' :
        saved_state_dict = model_zoo.load_url(args.restore_from)
    else:
        saved_state_dict = torch.load(args.restore_from)
    model.load_state_dict(saved_state_dict['model_state_dict'])

    model.eval()
    model.cuda(gpu0)

    image_size = (1024, 512)#(1280,720) #(2048, 1024)
    cityscape_image_size = (2048, 1024)

    print ("evaluating {}".format(args.restore_from))
    print ("************ best mIoU:{} *******".format(saved_state_dict['best_mean_iu']))
    print("evaluation image size: {}, please make sure this image size is equal to your training image size, this is important for your final mIoU!".format(image_size))

    testloader = data.DataLoader(cityscapesDataSet( crop_size=(image_size[0], image_size[1]), mean=IMG_MEAN, scale=False, mirror=False, set=args.set),
                                    batch_size=1, shuffle=False, pin_memory=True)

    interp = nn.Upsample(size=(cityscape_image_size[1], cityscape_image_size[0]), mode='bilinear')

    from tensorpack.utils.stats import MIoUStatistics
    stat = MIoUStatistics(NUM_CLASSES)

    for index, batch in tqdm(enumerate(testloader)):
        image,label, _, name = batch
        image, label = Variable(image, volatile=True), Variable(label)

        #output2 = model(image.cuda(gpu0))
        output1, output2 = model(image.cuda(gpu0))
        output = interp(output2).cpu().data[0].numpy()

        output = output.transpose(1,2,0)
        output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)

        output_col = colorize_mask(output)


        name = name[0].split('/')[-1]
        output_col.save('%s/%s_color.png' % (args.save, name.split('.')[0]))

        stat.feed(output, label.data.cpu().numpy().squeeze())

    print("tensorpack IoU: {}".format(stat.mIoU_beautify))
    print("tensorpack class16 IoU: {}".format(np.sum(stat.IoU)/16))
    print("tensorpack mIoU: {}".format(stat.mIoU))
    print("tensorpack mean_accuracy: {}".format(stat.mean_accuracy))
    print("tensorpack accuracy: {}".format(stat.accuracy))
Example #6
0
def main():
    """Create the model and start the evaluation process."""

    args = get_arguments()

    gpu0 = args.gpu

    if not os.path.exists(args.save):
        os.makedirs(args.save)

    model = Res_Deeplab(num_classes=args.num_classes)

    if args.restore_from[:4] == 'http':
        saved_state_dict = model_zoo.load_url(args.restore_from)
    else:
        saved_state_dict = torch.load(args.restore_from)
    model.load_state_dict(saved_state_dict)

    model.eval()
    model.cuda(gpu0)

    if os.path.isfile(citys_feat_distr_path) == False:
        testloader = data.DataLoader(cityscapesDataSet(args.data_dir,
                                                       args.data_list,
                                                       crop_size=(1024, 512),
                                                       mean=CITY_IMG_MEAN,
                                                       scale=False,
                                                       mirror=False,
                                                       set=args.set),
                                     batch_size=1,
                                     shuffle=False,
                                     pin_memory=True)

        # interp = nn.Upsample(size=(1024, 2048), mode='bilinear', align_corners=True)
        interp_down = nn.Upsample(size=(16, 32),
                                  mode='bilinear',
                                  align_corners=True)
        citys_feat_distrs = []
        citys_img_paths = []
        for index, batch in enumerate(testloader):
            if index % 100 == 0:
                print('%d processd of %d' % (index, len(testloader)))
            image, _, name = batch
            output1, output2 = model(Variable(image, volatile=True).cuda(gpu0))
            output = interp_down(output2).cpu().data[0].numpy()
            output = output.transpose(1, 2, 0)

            output = output[np.newaxis, :]  # add a dim
            citys_feat_distrs.extend(output)
            citys_img_paths.extend(name)

            #name: 'frankfurt/frankfurt_000001_007973_leftImg8bit.png'
            # name = name[0].split('/')[-1]
        citys_feat_distrs_np = np.array(citys_feat_distrs)
        citys_img_paths_np = np.array(citys_img_paths)
        np.save(citys_feat_distr_path, citys_feat_distrs_np)
        np.save(citys_imgpaths_path, citys_img_paths_np)
    else:
        citys_feat_distrs_np = np.load(citys_feat_distr_path)
        citys_img_paths_np = np.load(citys_imgpaths_path)

    if os.path.isfile(gta_feat_distr_path) == False:
        gtaloader = data.DataLoader(GTA5DataSet(GTA_DATA_DIRECTORY,
                                                GTA_DATA_LIST_PATH,
                                                crop_size=(1024, 512),
                                                mean=GTA_IMG_MEAN,
                                                scale=False,
                                                mirror=False),
                                    batch_size=1,
                                    shuffle=False,
                                    pin_memory=True)

        interp_down = nn.Upsample(size=(16, 32),
                                  mode='bilinear',
                                  align_corners=True)
        gta_feat_distrs = []
        gta_img_paths = []
        for index, batch in enumerate(gtaloader):
            if index % 100 == 0:
                print('%d processd of %d' % (index, len(gtaloader)))
            image, _, _, name = batch
            output1, output2 = model(Variable(image, volatile=True).cuda(gpu0))
            output = interp_down(output2).cpu().data[0].numpy()
            output = output.transpose(1, 2, 0)

            output = output[np.newaxis, :]  # add a dim
            gta_feat_distrs.extend(output)
            gta_img_paths.extend(name)

        gta_feat_distrs_np = np.array(gta_feat_distrs)
        gta_img_paths_np = np.array(gta_img_paths)
        np.save(gta_feat_distr_path, gta_feat_distrs_np)
        np.save(gta_imgpaths_path, gta_img_paths_np)
    else:
        gta_feat_distrs_np = np.load(gta_feat_distr_path)
        gta_img_paths_np = np.load(gta_imgpaths_path)

    if os.path.isfile(closest_imgs_path) == False:
        temp_feat = citys_feat_distrs_np[0, :]
        # [m,n,c]=temp_feat.shape
        pixel_amount = temp_feat.size
        closest_imgs_locs = []
        for i in range(citys_img_paths_np.shape[0]):
            cur_citys_feat = citys_feat_distrs_np[i, :]
            distances = []
            if i % 10 == 0:
                print(i)
            for j in range(gta_img_paths_np.shape[0]):
                cur_gta_feat = gta_feat_distrs_np[j, :]
                dist_abs = abs(cur_citys_feat - cur_gta_feat)
                # e_dist = np.sqrt(np.square(dist_abs).sum(axis=1))
                dist_mean = np.sum(dist_abs) / pixel_amount
                distances.append(dist_mean)
            min_loc = np.argsort(distances)
            # need to check overlap
            top_ord = 3
            closest_imgs_loc = min_loc[:top_ord]
            intersect_imgs = np.intersect1d(closest_imgs_loc,
                                            closest_imgs_locs)
            while intersect_imgs.size:
                inters_num = len(intersect_imgs)
                closest_imgs_loc_confirm = np.setdiff1d(
                    closest_imgs_loc, intersect_imgs)  # find the difference
                closest_imgs_loc_candi = min_loc[top_ord:top_ord + inters_num]
                top_ord = top_ord + inters_num
                closest_imgs_loc_confirm = np.concatenate(
                    [closest_imgs_loc_confirm, closest_imgs_loc_candi])
                closest_imgs_loc = closest_imgs_loc_confirm
                intersect_imgs = np.intersect1d(closest_imgs_loc,
                                                closest_imgs_locs)

            closest_imgs_locs.extend(closest_imgs_loc)
        np.save(closest_imgs_path, closest_imgs_locs)
    else:
        closest_imgs_locs = np.load(closest_imgs_path)
    closest_imgs_locs_uni = np.unique(closest_imgs_locs)
    zq = 1

    # get file_names
    with open(src_train_imgs_txt, 'w') as f_train:
        for img_num in closest_imgs_locs_uni:
            line = gta_img_paths_np[img_num] + '\n'
            f_train.write(line)
Example #7
0
def main():
    """Create the model and start the training."""

    h, w = map(int, args.input_size.split(','))
    input_size = (h, w)

    h, w = map(int, args.com_size.split(','))
    com_size = (h, w)

    h, w = map(int, args.input_size_target.split(','))
    input_size_target = (h, w)

    cudnn.enabled = True
    gpu = args.gpu
    torch.cuda.set_device(args.gpu)

    # Create network
    if args.model == 'DeepLab':
        model = Res_Deeplab(num_classes=args.num_classes)
        saved_state_dict = torch.load(
            args.restore_from,
            map_location=lambda storage, loc: storage.cuda(args.gpu))
        model.load_state_dict(saved_state_dict)

    # Create network


#    if args.model == 'DeepLab':
#        model = Res_Deeplab(num_classes=args.num_classes)
#   #     saved_state_dict = torch.load(args.restore_from, map_location=lambda storage, loc: storage.cuda(args.gpu))
#        saved_state_dict = torch.loa

    model.train()
    model.cuda(args.gpu)

    cudnn.benchmark = True

    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)

    print("amy", torch.cuda.current_device())
    ############################
    #validation data
    testloader = data.DataLoader(dataset.cityscapes_dataset.cityscapesDataSet(
        args.data_dir_target,
        args.data_list_target_val,
        crop_size=input_size_target,
        mean=IMG_MEAN,
        scale=False,
        mirror=False,
        set=args.set_val),
                                 batch_size=1,
                                 shuffle=False,
                                 pin_memory=True)

    with open('./dataset/cityscapes_list/info.json', 'r') as fp:
        info = json.load(fp)
    mapping = np.array(info['label2train'], dtype=np.int)
    label_path_list = './dataset/cityscapes_list/label.txt'
    gt_imgs = open(label_path_list, 'r').read().splitlines()
    gt_imgs = [join('./data/Cityscapes/data/gtFine/val', x) for x in gt_imgs]

    interp_val = nn.Upsample(size=(com_size[1], com_size[0]), mode='bilinear')

    ############################

    trainloader = data.DataLoader(GTA5DataSet(args.data_dir,
                                              args.data_list,
                                              max_iters=args.num_steps *
                                              args.iter_size * args.batch_size,
                                              crop_size=input_size,
                                              scale=args.random_scale,
                                              mirror=args.random_mirror,
                                              mean=IMG_MEAN),
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=args.num_workers,
                                  pin_memory=True)

    trainloader_iter = enumerate(trainloader)

    targetloader = data.DataLoader(cityscapesDataSet(
        args.data_dir_target,
        args.data_list_target,
        max_iters=args.num_steps * args.iter_size * args.batch_size,
        crop_size=input_size_target,
        scale=False,
        mirror=args.random_mirror,
        mean=IMG_MEAN,
        set=args.set),
                                   batch_size=args.batch_size,
                                   shuffle=True,
                                   num_workers=args.num_workers,
                                   pin_memory=True)

    targetloader_iter = enumerate(targetloader)

    # implement model.optim_parameters(args) to handle different models' lr setting

    optimizer = optim.SGD(model.optim_parameters(args),
                          lr=args.learning_rate,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    optimizer.zero_grad()

    bce_loss = torch.nn.BCEWithLogitsLoss()

    interp = nn.Upsample(size=(input_size[1], input_size[0]), mode='bilinear')
    # interp_target = nn.UpsamplingBilinear2d(size=(input_size_target[1], input_size_target[0]))

    Softmax = torch.nn.Softmax()
    bce_loss = torch.nn.BCEWithLogitsLoss()

    for i_iter in range(args.num_steps):

        # loss_seg_value1 = 0
        loss_seg_value = 0
        loss_weak_value = 0
        loss_neg_value = 0
        loss_lse_source_value = 0
        loss_lse_target_value = 0
        entropy_samples_value = 0
        model.train()
        optimizer.zero_grad()

        adjust_learning_rate(optimizer, i_iter)

        for sub_i in range(args.iter_size):

            # train with source

            #   _, batch = next(trainloader_iter)
            #   images, labels, class_label_source, mask_weakly, _, name = batch
            #   images = Variable(images).cuda(args.gpu)
            #   pred = model(images)
            #   pred = interp(pred)

            #   loss_seg = loss_calc(pred, labels, args.gpu)

            #   num = torch.sum(mask_weakly[0][0]).data.item()
            #   class_label_source_lse = class_label_source.type(torch.FloatTensor)
            #   exp_source = torch.min(torch.exp(1*pred), Variable(torch.exp(torch.tensor(40.0))).cuda(args.gpu))
            #   lse  = (1.0/1) * torch.log( (512*256/num) * AvePool(torch.exp(1*pred) * mask_weakly.type(torch.FloatTensor).cuda(args.gpu)))
            #   loss_lse_source = bce_loss(lse, Variable(class_label_source_lse.reshape(lse.

            _, batch = next(targetloader_iter)
            images, class_label, _, _ = batch
            images = Variable(images).cuda(args.gpu)
            _, pred_target = model(images)

    #   optimizer.step()
        del pred_target, batch, images

        print('exp = {}'.format(args.snapshot_dir))
        print(
            'iter = {0:8d}/{1:8d}, loss_seg = {2:.3f} loss_lse_source = {3:.3f} loss_lse_target = {4:.3f}'
            .format(i_iter, args.num_steps, loss_seg_value,
                    loss_lse_source_value, loss_lse_target_value))

        if i_iter >= args.num_steps_stop - 1:
            print('save model ...')
            torch.save(
                model.state_dict(),
                osp.join(args.snapshot_dir, 'GTA5_' + str(i_iter) + '.pth'))
            break

        if i_iter % args.save_pred_every == 0 and i_iter != 0:
            print('taking snapshot ...')
            model.eval()
            hist = np.zeros((19, 19))

            f = open(args.results_dir, 'a')
            for index, batch in enumerate(testloader):
                print(index)
                image, _, name = batch
                _, output = model(
                    Variable(image, volatile=True).cuda(args.gpu))
                pred = interp_val(output)
                pred = pred[0].permute(1, 2, 0)
                pred = torch.max(pred, 2)[1].byte()
                pred_cpu = pred.data.cpu().numpy()
                del pred, output
                label = Image.open(gt_imgs[index])
                label = np.array(label.resize(com_size, Image.NEAREST))
                label = label_mapping(label, mapping)
                hist += fast_hist(label.flatten(), pred_cpu.flatten(), 19)

            mIoUs = per_class_iu(hist)
            mIoU = round(np.nanmean(mIoUs) * 100, 2)
            print(mIoU)
            f.write('i_iter:{:d},        miou:{:0.5f} \n'.format(i_iter, mIoU))
            f.close()
def main():
    """Create the model and start the evaluation process."""

    args = get_arguments()

    gpu0 = args.gpu

    if not os.path.exists(args.save):
        os.makedirs(args.save)

    model = Res_Deeplab(num_classes=args.num_classes)

    if args.restore_from[:4] == 'http':
        saved_state_dict = model_zoo.load_url(args.restore_from)
    else:
        saved_state_dict = torch.load(args.restore_from)
    model.load_state_dict(saved_state_dict)

    model.eval()
    model.cuda(gpu0)

    #testloader = data.DataLoader(cityscapesDataSet(args.data_dir, args.data_list, crop_size=(1024, 512), mean=IMG_MEAN, scale=False, mirror=False, set=args.set),
    #                                batch_size=1, shuffle=False, pin_memory=True)

    interp = nn.Upsample(size=(1024, 2048), mode='bilinear')
    '''
    for index, batch in enumerate(testloader):
        if index % 100 == 0:
            print '%d processd' % index
        image, _, name = batch
        output1, output2 = model(Variable(image, volatile=True).cuda(gpu0))
        output = interp(output2).cpu().data[0].numpy()

        output = output.transpose(1,2,0)
        output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)

        output_col = colorize_mask(output)
        output = Image.fromarray(output)

        name = name[0].split('/')[-1]
        output.save('%s/%s' % (args.save, name))
        output_col.save('%s/%s_color.png' % (args.save, name.split('.')[0]))
    '''
    filee = open('val3.txt')
    for linee in filee:
        linee = linee.rstrip('\n')
        image = Image.open(linee).convert('RGB')

        crop_size = (1024, 512)
        image = image.resize(crop_size, Image.BICUBIC)

        image = np.asarray(image, np.float32)

        size = image.shape
        image = image[:, :, ::-1]  # change to BGR
        mean = (128, 128, 128)
        image -= mean
        image = image.transpose((2, 0, 1))

        image = image.copy()
        image = np.reshape(image, [1, 3, 512, 1024])
        #image = data.DataLoader(image)

        name = linee
        image = torch.from_numpy(image)

        output1, output2 = model(Variable(image, volatile=True).cuda(gpu0))
        output = interp(output2).cpu().data[0].numpy()

        output = output.transpose(1, 2, 0)
        output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)

        output_col = colorize_mask(output)
        output = Image.fromarray(output)
        #output = image

        name = name.split('/')[-1]
        print name
        output.save('%s/%s' % (args.save, name))
        output_col.save('%s/%s_color.png' % (args.save, name.split('.')[0]))
Example #9
0
def main():
    """Create the model and start the training."""

    w, h = map(int, args.input_size.split(','))
    input_size = (w, h)

    w, h = map(int, args.input_size_target.split(','))
    input_size_target = (w, h)

    h, w = map(int, args.com_size.split(','))
    com_size = (h, w)

############################
#validation data
    testloader = data.DataLoader(cityscapesDataSet(args.data_dir_target, args.data_list_target_val, crop_size=input_size, mean=IMG_MEAN, scale=False, mirror=False, set=args.set_val),
                                    batch_size=1, shuffle=False, pin_memory=True)
    with open('./dataset/cityscapes_list/info.json', 'r') as fp:
        info = json.load(fp)
    mapping = np.array(info['label2train'], dtype=np.int)
    label_path_list = './dataset/cityscapes_list/label.txt'
    gt_imgs = open(label_path_list, 'r').read().splitlines()
    gt_imgs = [osp.join('./data/Cityscapes/data/gtFine/val', x) for x in gt_imgs]

    interp_val = nn.UpsamplingBilinear2d(size=(com_size[1], com_size[0]))

############################

    cudnn.enabled = True

    # Create network
 #   if args.model == 'DeepLab':
 #       model = Res_Deeplab(num_classes=args.num_classes)
 #       if args.restore_from[:4] == 'http' :
 #           saved_state_dict = model_zoo.load_url(args.restore_from)
    #    else:
   #         saved_state_dict = torch.load(args.restore_from)

  #      new_params = model.state_dict().copy()
 #       for i in saved_state_dict:
#            # Scale.layer5.conv2d_list.3.weight
       #     i_parts = i.split('.')
      #      # print i_parts
     #       if not args.num_classes == 19 or not i_parts[1] == 'layer5':
    #            new_params['.'.join(i_parts[1:])] = saved_state_dict[i]
   #             # print i_parts
  #      model.load_state_dict(new_params)

    if args.model == 'DeepLab':
        model = Res_Deeplab(num_classes=args.num_classes)
        saved_state_dict = torch.load(args.restore_from)
        model.load_state_dict(saved_state_dict)

    model.train()
    model.cuda(args.gpu)

    cudnn.benchmark = True

    # init D
    model_D1 = FCDiscriminator(num_classes=args.num_classes)
    model_D2 = FCDiscriminator(num_classes=args.num_classes)

    model_D1.train()
    model_D1.cuda(args.gpu)

    model_D2.train()
    model_D2.cuda(args.gpu)

    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)

    trainloader = data.DataLoader(
        GTA5DataSet(args.data_dir, args.data_list, max_iters=args.num_steps * args.iter_size * args.batch_size,
                    crop_size=input_size,
                    scale=args.random_scale, mirror=args.random_mirror, mean=IMG_MEAN),
        batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True)

    trainloader_iter = enumerate(trainloader)

    targetloader = data.DataLoader(cityscapesDataSet(args.data_dir_target, args.data_list_target,
                                                     max_iters=args.num_steps * args.iter_size * args.batch_size,
                                                     crop_size=input_size_target,
                                                     scale=False, mirror=args.random_mirror, mean=IMG_MEAN,
                                                     set=args.set),
                                   batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers,
                                   pin_memory=True)


    targetloader_iter = enumerate(targetloader)

    # implement model.optim_parameters(args) to handle different models' lr setting

    optimizer = optim.SGD(model.optim_parameters(args),
                          lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)
    optimizer.zero_grad()

    optimizer_D1 = optim.Adam(model_D1.parameters(), lr=args.learning_rate_D, betas=(0.9, 0.99))
    optimizer_D1.zero_grad()

    optimizer_D2 = optim.Adam(model_D2.parameters(), lr=args.learning_rate_D, betas=(0.9, 0.99))
    optimizer_D2.zero_grad()

    bce_loss = torch.nn.BCEWithLogitsLoss()

    interp = nn.Upsample(size=(input_size[1], input_size[0]), mode='bilinear')
    interp_target = nn.Upsample(size=(input_size_target[1], input_size_target[0]), mode='bilinear')

    # labels for adversarial training
    source_label = 0
    target_label = 1
    AvePool = torch.nn.AvgPool2d(kernel_size=(512,1024))

    for i_iter in range(args.num_steps):
        model.train()    
        loss_lse_target_value = 0
        loss_seg_value1 = 0
        loss_adv_target_value1 = 0
        loss_D_value1 = 0

        loss_seg_value2 = 0
        loss_adv_target_value2 = 0
        loss_D_value2 = 0

        optimizer.zero_grad()
        adjust_learning_rate(optimizer, i_iter)

        optimizer_D1.zero_grad()
        optimizer_D2.zero_grad()
        adjust_learning_rate_D(optimizer_D1, i_iter)
        adjust_learning_rate_D(optimizer_D2, i_iter)

        for sub_i in range(args.iter_size):

            # train G

            # don't accumulate grads in D
            for param in model_D1.parameters():
                param.requires_grad = False

            for param in model_D2.parameters():
                param.requires_grad = False

            # train with source

            _, batch = next(trainloader_iter)
            images, labels, class_label_source, mask_weakly, _, name = batch
            images = Variable(images).cuda(args.gpu)

            pred1, pred2 = model(images)
            pred1 = interp(pred1)
            pred2 = interp(pred2)

            loss_seg1 = loss_calc(pred1, labels, args.gpu)
            loss_seg2 = loss_calc(pred2, labels, args.gpu)
            loss = loss_seg2 + args.lambda_seg * loss_seg1

            # proper normalization
            loss = loss / args.iter_size
            loss.backward()
            loss_seg_value1 += loss_seg1.data.item() / args.iter_size
            loss_seg_value2 += loss_seg2.data.item() / args.iter_size

            # train with target

            _, batch = next(targetloader_iter)
            images, class_label,  _, _ = batch
            images = Variable(images).cuda(args.gpu)

            pred_target1, pred_target2 = model(images)
            pred_target1 = interp_target(pred_target1)
            pred_target2 = interp_target(pred_target2)

            class_label_target_lse = class_label.type(torch.FloatTensor)
            exp_target = torch.min(torch.exp(1*pred_target2), Variable(torch.exp(torch.tensor(40.0))).cuda(args.gpu))
            lse  = (1.0/1) * torch.log(AvePool(exp_target))
            loss_lse_target = bce_loss(lse, Variable(class_label_target_lse.reshape(lse.size())).cuda(args.gpu))

            D_out1 = model_D1(F.softmax(pred_target1))
            D_out2 = model_D2(F.softmax(pred_target2))

            loss_adv_target1 = bce_loss(D_out1,
                                       Variable(torch.FloatTensor(D_out1.data.size()).fill_(source_label)).cuda(
                                           args.gpu))

            loss_adv_target2 = bce_loss(D_out2,
                                        Variable(torch.FloatTensor(D_out2.data.size()).fill_(source_label)).cuda(
                                            args.gpu))

            loss = args.lambda_adv_target1 * loss_adv_target1 + args.lambda_adv_target2 * loss_adv_target2  + 0.2 * loss_lse_target
            loss = loss / args.iter_size
            loss.backward()
            loss_adv_target_value1 += loss_adv_target1.data.item() / args.iter_size
            loss_adv_target_value2 += loss_adv_target2.data.item() / args.iter_size
            loss_lse_target_value += loss_lse_target.data.item() / args.iter_size
            # train D

            # bring back requires_grad
            for param in model_D1.parameters():
                param.requires_grad = True

            for param in model_D2.parameters():
                param.requires_grad = True

            # train with source
            pred1 = pred1.detach()
            pred2 = pred2.detach()

            D_out1 = model_D1(F.softmax(pred1))
            D_out2 = model_D2(F.softmax(pred2))

            loss_D1 = bce_loss(D_out1,
                              Variable(torch.FloatTensor(D_out1.data.size()).fill_(source_label)).cuda(args.gpu))

            loss_D2 = bce_loss(D_out2,
                               Variable(torch.FloatTensor(D_out2.data.size()).fill_(source_label)).cuda(args.gpu))

            loss_D1 = loss_D1 / args.iter_size / 2
            loss_D2 = loss_D2 / args.iter_size / 2

            loss_D1.backward()
            loss_D2.backward()

            loss_D_value1 += loss_D1.data.item()
            loss_D_value2 += loss_D2.data.item()

            # train with target
            pred_target1 = pred_target1.detach()
            pred_target2 = pred_target2.detach()

            D_out1 = model_D1(F.softmax(pred_target1))
            D_out2 = model_D2(F.softmax(pred_target2))

            loss_D1 = bce_loss(D_out1,
                              Variable(torch.FloatTensor(D_out1.data.size()).fill_(target_label)).cuda(args.gpu))

            loss_D2 = bce_loss(D_out2,
                               Variable(torch.FloatTensor(D_out2.data.size()).fill_(target_label)).cuda(args.gpu))

            loss_D1 = loss_D1 / args.iter_size / 2
            loss_D2 = loss_D2 / args.iter_size / 2

            loss_D1.backward()
            loss_D2.backward()           

            loss_D_value1 += loss_D1.data.item()
            loss_D_value2 += loss_D2.data.item()

        optimizer.step()
        optimizer_D1.step()
        optimizer_D2.step()
        del D_out1, D_out2, pred1, pred2, pred_target1, pred_target2, images, labels        

        print('exp = {}'.format(args.snapshot_dir))
        print(
        'iter = {0:8d}/{1:8d}, loss_seg1 = {2:.3f} loss_seg2 = {3:.3f} loss_adv1 = {4:.3f}, loss_adv2 = {5:.3f} loss_D1 = {6:.3f} loss_D2 = {7:.3f} loss_lse_target = {8:.3f}'.format(i_iter, args.num_steps, loss_seg_value1, loss_seg_value2, loss_adv_target_value1, loss_adv_target_value2, loss_D_value1, loss_D_value2, loss_lse_target_value))

        if i_iter >= args.num_steps_stop - 1:
            print('save model ...')
            torch.save(model.state_dict(), osp.join(args.snapshot_dir, 'GTA5_' + str(args.num_steps_stop) + '.pth'))
            torch.save(model_D1.state_dict(), osp.join(args.snapshot_dir, 'GTA5_' + str(args.num_steps_stop) + '_D1.pth'))
            torch.save(model_D2.state_dict(), osp.join(args.snapshot_dir, 'GTA5_' + str(args.num_steps_stop) + '_D2.pth'))
            break

        if i_iter % args.save_pred_every == 0 and i_iter != 0:
            print('taking snapshot ...')
            torch.save(model.state_dict(), osp.join(args.snapshot_dir, 'GTA5_' + str(i_iter) + '.pth'))
            hist = np.zeros((19, 19))
     #       model.cuda(0)
            model.eval()
            f = open(args.results_dir, 'a')
            for index, batch in enumerate(testloader):
                print(index)
                image, _, _, name = batch
                output1, output2 = model(Variable(image, volatile=True).cuda(args.gpu))
                pred = interp_val(output2)
                pred = pred[0].permute(1,2,0)
                pred = torch.max(pred, 2)[1].byte()
                pred_cpu = pred.data.cpu().numpy()
                del pred, output1, output2
                label = Image.open(gt_imgs[index])
                label = np.array(label.resize(com_size, Image.NEAREST))
                label = label_mapping(label, mapping)
                hist += fast_hist(label.flatten(), pred_cpu.flatten(), 19)
      #      model.cuda(args.gpu)     
            mIoUs = per_class_iu(hist)
            mIoU = round(np.nanmean(mIoUs) * 100, 2)
            print(mIoU)
            f.write('i_iter:{:d},        miou:{:0.5f} \n'.format(i_iter,mIoU))
            f.close()
Example #10
0
def main():
    """Create the model and start the evaluation process."""

    args = get_arguments()

    if not os.path.exists(args.save):
        os.makedirs(args.save)

    gpu0 = args.gpu

    model = Res_Deeplab(num_classes=args.num_classes)

    if args.restore_from[:4] == 'http':
        saved_state_dict = model_zoo.load_url(args.restore_from)
    else:
        saved_state_dict = torch.load(args.restore_from)
    model.load_state_dict(saved_state_dict)

    model.eval()
    model.cuda(gpu0)

    #===========load discriminator model====begin===
    model_d2 = FCDiscriminator(num_classes=args.num_classes)
    d2_state_dict = torch.load(args.dis_restore_from)
    model_d2.load_state_dict(d2_state_dict)
    model_d2.eval()
    model_d2.cuda(gpu0)

    #===========load discriminator model====end===
    testloader = data.DataLoader(cityscapesDataSet(args.data_dir,
                                                   args.data_list,
                                                   crop_size=(1024, 512),
                                                   mean=IMG_MEAN,
                                                   scale=False,
                                                   mirror=False,
                                                   set=args.set),
                                 batch_size=1,
                                 shuffle=False,
                                 pin_memory=True)

    interp = nn.Upsample(size=(1024, 2048),
                         mode='bilinear',
                         align_corners=True)
    # interp_target = nn.Upsample(size=(input_size_target[1], input_size_target[0]), mode='bilinear')

    out_values = []
    fine_out_values = []
    retrain_list = []
    file = open(CITYS_RETRAIN_TXT, 'w')
    for index, batch in enumerate(testloader):
        if index % 20 == 0:
            print('%d processd of %d' % (index, len(testloader)))
        image, _, name = batch
        output1, output2 = model(Variable(image, volatile=True).cuda(gpu0))
        ini_output = interp(output2)
        d2_out1 = model_d2(F.softmax(ini_output,
                                     dim=1))  #.cpu().data[0].numpy()
        out_valu = d2_out1.mean()
        out_valu_img = np.array([[name[0]], out_valu.cpu().data.numpy()])

        out_values.extend(out_valu_img)
        if out_valu.cpu().data.numpy() > 0.64:
            fine_out_valu_img = np.array([[name[0]],
                                          out_valu.cpu().data.numpy()])
            fine_out_values.extend(fine_out_valu_img)

            file.write(name[0] + '\n')
            output = interp(output2).cpu().data[0].numpy()

            output = output.transpose(1, 2, 0)
            output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)

            output_col = colorize_mask(output)
            name = name[0].split('/')[-1]
            # output_col.save('%s/%s_color.png' % (args.save, name.split('.')[0]))
            output_col.save('%s/%s.png' % (args.save, name.split('.')[0]))
            # print('its confidence value is %f' % out_valu)

            # plt.imshow(output_col)
            # plt.title(str(out_valu))
            # plt.show()

            # output = Image.fromarray(output)
            # output.save('%s/%s' % (args.save, name))

    out_values = np.array(out_values)

    np.save(CITYS_VALUES_SV_PATH, out_values)
    np.save(CITYS_FINE_VALUES_SV_PATH, fine_out_values)

    file.close()
def main():

    city = np.load("dump_cityscape5.npy")
    gta = np.load("dump_gta5.npy")

    city_scape = city[:1000, :]
    gta5 = gta[:1000, :]

    combined = np.concatenate((city[1000:, :], gta[1000:, :]))

    np.save('source.npy', gta5)
    np.save('target.npy', city_scape)
    np.save('mixed.npy', combined)
    print(city_scape.shape)
    print(gta5.shape)
    print(combined.shape)
    exit()

    print(type(dump))
    print(dump.shape)

    b = dump
    print(type(dump))
    print(dump.shape)

    import random

    a = np.stack(random.sample(a, 500))
    b = np.stack(random.sample(b, 500))

    dump = np.concatenate((a, b))
    print(dump.shape)

    arr = np.arange(10)
    #print(dump)
    exit()
    """Create the model and start the evaluation process."""

    args = get_arguments()

    gpu0 = args.gpu

    if not os.path.exists(args.save):
        os.makedirs(args.save)

    model = Res_Deeplab(num_classes=args.num_classes)
    #model = getVGG(num_classes=args.num_classes)

    if args.restore_from[:4] == 'http':
        saved_state_dict = model_zoo.load_url(args.restore_from)
    else:
        saved_state_dict = torch.load(args.restore_from)
    model.load_state_dict(saved_state_dict)

    model.eval()
    model.cuda(gpu0)

    #trainloader = data.DataLoader(cityscapesDataSet(args.data_dir, args.data_list, crop_size=(1024, 512), mean=IMG_MEAN, scale=False, mirror=False, set=args.set), batch_size=1, shuffle=False, pin_memory=True)

    trainloader = data.DataLoader(GTA5DataSet(args.data_dir,
                                              args.data_list,
                                              crop_size=(1024, 512),
                                              mean=IMG_MEAN,
                                              scale=False,
                                              mirror=False),
                                  batch_size=1,
                                  shuffle=False,
                                  pin_memory=True)

    trainloader_iter = enumerate(trainloader)

    interp = nn.Upsample(size=(1024, 2048), mode='bilinear')
    dump_array = np.array((1, 2))

    for itr in xrange(2000):
        print(itr)
        _, batch = trainloader_iter.next()
        images, labels, _, _ = batch
        #images, _, _ = batch

        output1, output2 = model(Variable(images, volatile=True).cuda(gpu0))
        import torch.nn.functional as F
        output2 = F.avg_pool2d(output2, (4, 4))
        output2 = output2.data.cpu().numpy()
        output2 = np.reshape(output2, (1, -1))

        if dump_array.shape == (2, ):
            dump_array = output2
        else:
            dump_array = np.concatenate((dump_array, output2))

    np.save('dump_gta5.npy', dump_array)