示例#1
0
文件: eval.py 项目: dingmyu/psa
def main():
    global args, logger
    args = get_parser().parse_args()
    logger = get_logger()
    # os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(str(x) for x in args.gpu)
    logger.info(args)
    assert args.classes > 1
    assert args.zoom_factor in [1, 2, 4, 8]
    assert (args.crop_h - 1) % 8 == 0 and (args.crop_w - 1) % 8 == 0
    assert args.split in ['train', 'val', 'test']
    logger.info("=> creating model ...")
    logger.info("Classes: {}".format(args.classes))

    value_scale = 255
    mean = [0.485, 0.456, 0.406]
    mean = [item * value_scale for item in mean]
    std = [0.229, 0.224, 0.225]
    std = [item * value_scale for item in std]

    train_transform = transforms.Compose([
        transforms.RandScale([0.5, 2]),
        transforms.RandRotate([-10, 10], padding=mean, ignore_label=args.ignore_label),
        transforms.RandomGaussianBlur(),
        transforms.RandomHorizontalFlip(),
        transforms.Crop([args.crop_h, args.crop_w], crop_type='rand', padding=mean, ignore_label=args.ignore_label),
        transforms.ToTensor()])

    val_transform = transforms.Compose([transforms.Crop([args.crop_h, args.crop_w], crop_type='center', padding=mean, ignore_label=args.ignore_label),
                                        transforms.ToTensor()])
    val_data1 = datasets.SegData(split='train', data_root=args.data_root, data_list=args.val_list1, transform=val_transform)
    val_loader1 = torch.utils.data.DataLoader(val_data1, batch_size=1, shuffle=False, num_workers=args.workers, pin_memory=True)

    from pspnet import PSPNet
    model = PSPNet(backbone = args.backbone, layers=args.layers, classes=args.classes, zoom_factor=args.zoom_factor, use_softmax=False, use_aux=False, pretrained=False, syncbn=False).cuda()

    logger.info(model)
  #  model = torch.nn.DataParallel(model).cuda()
    model = model.cuda()
    cudnn.enabled = True
    cudnn.benchmark = True
    if os.path.isfile(args.model_path):
        logger.info("=> loading checkpoint '{}'".format(args.model_path))
        checkpoint = torch.load(args.model_path)
     #   model.load_state_dict(checkpoint['state_dict'], strict=False)
     #   logger.info("=> loaded checkpoint '{}'".format(args.model_path))


        pretrained_dict = {k.replace('module.',''): v for k, v in checkpoint['state_dict'].items()}

        dict1 = model.state_dict()
        model.load_state_dict(pretrained_dict, strict=False)

    else:
        raise RuntimeError("=> no checkpoint found at '{}'".format(args.model_path))
    cv2.setNumThreads(0)
    validate(val_loader1, val_data1.data_list, model, args.classes, mean, std, args.base_size1, args.crop_h, args.crop_w, args.scales)
示例#2
0
def main():
    global args, logger, writer
    args = get_parser().parse_args()
    logger = get_logger()
    writer = SummaryWriter(args.save_path)
    # os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(str(x) for x in args.gpu)

    if args.dist:
        dist_init(args.port, backend=args.backend)

    if len(args.gpu) == 1:
        args.syncbn = False
    logger.info(args)
    assert args.classes > 1
    assert args.zoom_factor in [1, 2, 4, 8]
    assert (args.crop_h - 1) % 8 == 0 and (args.crop_w - 1) % 8 == 0

    world_size = 1
    rank = 0
    if args.dist:
        rank = dist.get_rank()
        world_size = dist.get_world_size()
    if rank == 0:
        logger.info('dist:{}'.format(args.dist))
        logger.info("=> creating model ...")
        logger.info("Classes: {}".format(args.classes))

    # rank = dist.get_rank()

    if args.bn_group > 1:
        args.syncbn = True
        bn_sync_stats = True
        bn_group_comm = simple_group_split(world_size, rank,
                                           world_size // args.bn_group)
    else:
        args.syncbn = False
        bn_sync_stats = False
        bn_group_comm = None

    model = PSPNet(layers=args.layers,
                   classes=args.classes,
                   zoom_factor=args.zoom_factor,
                   syncbn=args.syncbn,
                   group_size=args.bn_group,
                   group=bn_group_comm,
                   sync_stats=bn_sync_stats)
    if rank == 0:
        logger.info(model)
    # optimizer = torch.optim.SGD(model.parameters(), args.base_lr, momentum=args.momentum, weight_decay=args.weight_decay)
    # newly introduced layer with lr x10
    optimizer = torch.optim.SGD([{
        'params': model.layer0.parameters()
    }, {
        'params': model.layer1.parameters()
    }, {
        'params': model.layer2.parameters()
    }, {
        'params': model.layer3.parameters()
    }, {
        'params': model.layer4.parameters()
    }, {
        'params': model.ppm.parameters(),
        'lr': args.base_lr * 10
    }, {
        'params': model.cls.parameters(),
        'lr': args.base_lr * 10
    }, {
        'params': model.aux.parameters(),
        'lr': args.base_lr * 10
    }],
                                lr=args.base_lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    # model = torch.nn.DataParallel(model).cuda()
    # if args.syncbn:
    #     from lib.syncbn import patch_replication_callback
    #     patch_replication_callback(model)
    model = model.cuda()

    cudnn.enabled = True
    cudnn.benchmark = True
    criterion = nn.NLLLoss(ignore_index=args.ignore_label)

    if args.weight:

        def map_func(storage, location):
            return storage.cuda()

        if os.path.isfile(args.weight):
            logger.info("=> loading weight '{}'".format(args.weight))
            checkpoint = torch.load(args.weight, map_location=map_func)
            model.load_state_dict(checkpoint['state_dict'])
            logger.info("=> loaded weight '{}'".format(args.weight))
        else:
            logger.info("=> no weight found at '{}'".format(args.weight))

    if args.resume:
        if os.path.isfile(args.resume):
            logger.info("=> loading checkpoint '{}'".format(args.resume))
            # checkpoint = torch.load(args.resume)
            # args.start_epoch = checkpoint['epoch']
            # model.load_state_dict(checkpoint['state_dict'])
            # optimizer.load_state_dict(checkpoint['optimizer'])
            model, optimizer, args.start_epoch = restore_from(
                model, optimizer, args.resume)

            logger.info("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, args.start_epoch))
        else:
            logger.info("=> no checkpoint found at '{}'".format(args.resume))

    if args.dist:
        broadcast_params(model)

    value_scale = 255
    mean = [0.485, 0.456, 0.406]
    mean = [item * value_scale for item in mean]
    std = [0.229, 0.224, 0.225]
    std = [item * value_scale for item in std]

    train_transform = transforms.Compose([
        transforms.RandScale([args.scale_min, args.scale_max]),
        transforms.RandRotate([args.rotate_min, args.rotate_max],
                              padding=mean,
                              ignore_label=args.ignore_label),
        transforms.RandomGaussianBlur(),
        transforms.RandomHorizontalFlip(),
        transforms.Crop([args.crop_h, args.crop_w],
                        crop_type='rand',
                        padding=mean,
                        ignore_label=args.ignore_label),
        transforms.ToTensor(),
        transforms.Normalize(mean=mean, std=std)
    ])
    train_data = datasets.SegData(split='train',
                                  data_root=args.data_root,
                                  data_list=args.train_list,
                                  transform=train_transform)
    train_sampler = None
    if args.dist:
        train_sampler = DistributedSampler(train_data)

    train_loader = torch.utils.data.DataLoader(
        train_data,
        batch_size=args.batch_size,
        shuffle=False if train_sampler else True,
        num_workers=args.workers,
        pin_memory=False,
        sampler=train_sampler)

    if args.evaluate:
        val_transform = transforms.Compose([
            transforms.Crop([args.crop_h, args.crop_w],
                            crop_type='center',
                            padding=mean,
                            ignore_label=args.ignore_label),
            transforms.ToTensor(),
            transforms.Normalize(mean=mean, std=std)
        ])
        val_data = datasets.SegData(split='val',
                                    data_root=args.data_root,
                                    data_list=args.val_list,
                                    transform=val_transform)
        val_sampler = None
        if args.dist:
            val_sampler = DistributedSampler(val_data)
        val_loader = torch.utils.data.DataLoader(
            val_data,
            batch_size=args.batch_size_val,
            shuffle=False,
            num_workers=args.workers,
            pin_memory=False,
            sampler=val_sampler)

    for epoch in range(args.start_epoch, args.epochs + 1):
        loss_train, mIoU_train, mAcc_train, allAcc_train = train(
            train_loader, model, criterion, optimizer, epoch, args.zoom_factor,
            args.batch_size, args.aux_weight)
        writer.add_scalar('loss_train', loss_train.cpu().numpy(), epoch)
        writer.add_scalar('mIoU_train', mIoU_train, epoch)
        writer.add_scalar('mAcc_train', mAcc_train, epoch)
        writer.add_scalar('allAcc_train', allAcc_train, epoch)
        # write parameters histogram costs lots of time
        # for name, param in model.named_parameters():
        #     writer.add_histogram(name, param, epoch)

        if args.evaluate and rank == 0:
            loss_val, mIoU_val, mAcc_val, allAcc_val = validate(
                val_loader, model, criterion, args.classes, args.zoom_factor)
            writer.add_scalar('loss_val', loss_val.cpu().numpy(), epoch)
            writer.add_scalar('mIoU_val', mIoU_val, epoch)
            writer.add_scalar('mAcc_val', mAcc_val, epoch)
            writer.add_scalar('allAcc_val', allAcc_val, epoch)

        if epoch % args.save_step == 0 and (rank == 0):
            filename = args.save_path + '/train_epoch_' + str(epoch) + '.pth'
            logger.info('Saving checkpoint to: ' + filename)
            torch.save(
                {
                    'epoch': epoch,
                    'state_dict': model.cpu().state_dict(),
                    'optimizer': optimizer.state_dict()
                }, filename)
示例#3
0
文件: train.py 项目: dingmyu/psa
def main():
    global args, logger, writer
    args = get_parser().parse_args()
    import multiprocessing as mp
    if mp.get_start_method(allow_none=True) != 'spawn':
        mp.set_start_method('spawn', force=True)
    rank, world_size = dist_init(args.port)
    logger = get_logger()
    writer = SummaryWriter(args.save_path)
    #os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(str(x) for x in args.gpu)
    #if len(args.gpu) == 1:
    #   args.syncbn = False
    if rank == 0:
        logger.info(args)
    assert args.classes > 1
    assert args.zoom_factor in [1, 2, 4, 8]
    assert (args.crop_h - 1) % 8 == 0 and (args.crop_w - 1) % 8 == 0
    assert args.net_type in [0, 1, 2, 3]

    if args.bn_group == 1:
        args.bn_group_comm = None
    else:
        assert world_size % args.bn_group == 0
        args.bn_group_comm = simple_group_split(world_size, rank,
                                                world_size // args.bn_group)

    if rank == 0:
        logger.info("=> creating model ...")
        logger.info("Classes: {}".format(args.classes))

    from pspnet import PSPNet
    model = PSPNet(backbone=args.backbone,
                   layers=args.layers,
                   classes=args.classes,
                   zoom_factor=args.zoom_factor,
                   syncbn=args.syncbn,
                   group_size=args.bn_group,
                   group=args.bn_group_comm).cuda()

    logger.info(model)
    # optimizer = torch.optim.SGD(model.parameters(), args.base_lr, momentum=args.momentum, weight_decay=args.weight_decay)
    # newly introduced layer with lr x10
    optimizer = torch.optim.SGD([{
        'params': model.layer0.parameters()
    }, {
        'params': model.layer1.parameters()
    }, {
        'params': model.layer2.parameters()
    }, {
        'params': model.layer3.parameters()
    }, {
        'params': model.layer4.parameters()
    }, {
        'params': model.ppm.parameters(),
        'lr': args.base_lr * 10
    }, {
        'params': model.cls.parameters(),
        'lr': args.base_lr * 10
    }, {
        'params': model.result.parameters(),
        'lr': args.base_lr * 10
    }, {
        'params': model.aux.parameters(),
        'lr': args.base_lr * 10
    }],
                                lr=args.base_lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    #model = torch.nn.DataParallel(model).cuda()
    model = DistModule(model)
    #if args.syncbn:
    #    from lib.syncbn import patch_replication_callback
    #    patch_replication_callback(model)
    cudnn.enabled = True
    cudnn.benchmark = True
    criterion = nn.NLLLoss(ignore_index=args.ignore_label).cuda()

    if args.weight:

        def map_func(storage, location):
            return storage.cuda()

        if os.path.isfile(args.weight):
            logger.info("=> loading weight '{}'".format(args.weight))
            checkpoint = torch.load(args.weight, map_location=map_func)
            model.load_state_dict(checkpoint['state_dict'])
            logger.info("=> loaded weight '{}'".format(args.weight))
        else:
            logger.info("=> no weight found at '{}'".format(args.weight))

    if args.resume:
        load_state(args.resume, model, optimizer)

    value_scale = 255
    mean = [0.485, 0.456, 0.406]
    mean = [item * value_scale for item in mean]
    std = [0.229, 0.224, 0.225]
    std = [item * value_scale for item in std]

    train_transform = transforms.Compose([
        transforms.RandScale([args.scale_min, args.scale_max]),
        transforms.RandRotate([args.rotate_min, args.rotate_max],
                              padding=mean,
                              ignore_label=args.ignore_label),
        transforms.RandomGaussianBlur(),
        transforms.RandomHorizontalFlip(),
        transforms.Crop([args.crop_h, args.crop_w],
                        crop_type='rand',
                        padding=mean,
                        ignore_label=args.ignore_label),
        transforms.ToTensor(),
        transforms.Normalize(mean=mean, std=std)
    ])
    train_data = datasets.SegData(split='train',
                                  data_root=args.data_root,
                                  data_list=args.train_list,
                                  transform=train_transform)
    train_sampler = DistributedSampler(train_data)
    train_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=args.batch_size,
                                               shuffle=False,
                                               num_workers=args.workers,
                                               pin_memory=False,
                                               sampler=train_sampler)

    for epoch in range(args.start_epoch, args.epochs + 1):
        loss_train, mIoU_train, mAcc_train, allAcc_train = train(
            train_loader, model, criterion, optimizer, epoch, args.zoom_factor,
            args.batch_size, args.aux_weight)
        if rank == 0:
            writer.add_scalar('loss_train', loss_train, epoch)
            writer.add_scalar('mIoU_train', mIoU_train, epoch)
            writer.add_scalar('mAcc_train', mAcc_train, epoch)
            writer.add_scalar('allAcc_train', allAcc_train, epoch)
        # write parameters histogram costs lots of time
        # for name, param in model.named_parameters():
        #     writer.add_histogram(name, param, epoch)

        if epoch % args.save_step == 0 and rank == 0:
            filename = args.save_path + '/train_epoch_' + str(epoch) + '.pth'
            logger.info('Saving checkpoint to: ' + filename)
            torch.save(
                {
                    'epoch': epoch,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict()
                }, filename)
示例#4
0
def main():
    """Create the model and start the training."""
    with open(args.config) as f:
        config = yaml.load(f)
    for k, v in config['common'].items():
        setattr(args, k, v)
    mkdirs(osp.join("logs/" + args.exp_name))

    logger = create_logger('global_logger',
                           "logs/" + args.exp_name + '/log.txt')
    logger.info('{}'.format(args))
    ##############################

    for key, val in vars(args).items():
        logger.info("{:16} {}".format(key, val))
    logger.info("random_scale {}".format(args.random_scale))
    logger.info("is_training {}".format(args.is_training))

    h, w = map(int, args.input_size.split(','))
    input_size = (h, w)

    h, w = map(int, args.input_size_target.split(','))
    input_size_target = (h, w)
    print(type(input_size_target[1]))
    cudnn.enabled = True
    args.snapshot_dir = args.snapshot_dir + args.exp_name
    tb_logger = SummaryWriter("logs/" + args.exp_name)
    ##############################

    #validation data
    h, w = map(int, args.input_size_test.split(','))
    input_size_test = (h, w)
    h, w = map(int, args.com_size.split(','))
    com_size = (h, w)
    h, w = map(int, args.input_size_crop.split(','))
    input_size_crop = h, w
    h, w = map(int, args.input_size_target_crop.split(','))
    input_size_target_crop = h, w

    test_normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                          std=[0.229, 0.224, 0.225])
    test_transform = transforms.Compose([
        transforms.Resize((input_size_test[1], input_size_test[0])),
        transforms.ToTensor(), test_normalize
    ])

    testloader = data.DataLoader(cityscapesDataSet(args.data_dir_target,
                                                   args.data_list_target_val,
                                                   crop_size=input_size_test,
                                                   set='train',
                                                   transform=test_transform),
                                 num_workers=args.num_workers,
                                 batch_size=1,
                                 shuffle=False,
                                 pin_memory=True)
    with open('./dataset/cityscapes_list/info.json', 'r') as fp:
        info = json.load(fp)
    mapping = np.array(info['label2train'], dtype=np.int)
    label_path_list_val = args.label_path_list_val
    label_path_list_test = './dataset/cityscapes_list/label.txt'
    gt_imgs_val = open(label_path_list_val, 'r').read().splitlines()
    gt_imgs_val = [osp.join(args.data_dir_target_val, x) for x in gt_imgs_val]
    test1loader = data.DataLoader(cityscapesDataSet(args.data_dir_target,
                                                    args.data_list_target_test,
                                                    crop_size=input_size_test,
                                                    set='val',
                                                    transform=test_transform),
                                  num_workers=args.num_workers,
                                  batch_size=1,
                                  shuffle=False,
                                  pin_memory=True)

    gt_imgs_test = open(label_path_list_test, 'r').read().splitlines()
    gt_imgs_test = [
        osp.join(args.data_dir_target_test, x) for x in gt_imgs_test
    ]

    name_classes = np.array(info['label'], dtype=np.str)
    interp_val = nn.Upsample(size=(com_size[1], com_size[0]),
                             mode='bilinear',
                             align_corners=True)

    ####
    #build model
    ####
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(arch=args.arch_encoder,
                                        fc_dim=args.fc_dim,
                                        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(arch=args.arch_decoder,
                                        fc_dim=args.fc_dim,
                                        num_class=args.num_classes,
                                        weights=args.weights_decoder,
                                        use_aux=True)

    model = SegmentationModule(net_encoder, net_decoder, args.use_aux)

    if args.num_gpus > 1:
        model = torch.nn.DataParallel(model)
        patch_replication_callback(model)
    model.cuda()

    nets = (net_encoder, net_decoder, None, None)
    optimizers = create_optimizer(nets, args)
    cudnn.enabled = True
    cudnn.benchmark = True
    model.train()

    mean = [0.485, 0.456, 0.406]
    std = [0.229, 0.224, 0.225]

    source_normalize = transforms_seg.Normalize(mean=mean, std=std)

    mean_mapping = [0.485, 0.456, 0.406]
    mean_mapping = [item * 255 for item in mean_mapping]

    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)
    source_transform = transforms_seg.Compose([
        transforms_seg.Resize([input_size[1], input_size[0]]),
        segtransforms.RandScale((args.scale_min, args.scale_max)),
        segtransforms.RandRotate((args.rotate_min, args.rotate_max),
                                 padding=mean_mapping,
                                 ignore_label=args.ignore_label),
        #segtransforms.RandomGaussianBlur(),
        #segtransforms.RandomHorizontalFlip(),
        segtransforms.Crop([input_size_crop[1], input_size_crop[0]],
                           crop_type='rand',
                           padding=mean_mapping,
                           ignore_label=args.ignore_label),
        transforms_seg.ToTensor(),
        source_normalize
    ])
    target_normalize = transforms_seg.Normalize(mean=mean, std=std)
    target_transform = transforms_seg.Compose([
        transforms_seg.Resize([input_size_target[1], input_size_target[0]]),
        segtransforms.RandScale((args.scale_min, args.scale_max)),
        segtransforms.RandRotate((args.rotate_min, args.rotate_max),
                                 padding=mean_mapping,
                                 ignore_label=args.ignore_label),
        #segtransforms.RandomGaussianBlur(),
        #segtransforms.RandomHorizontalFlip(),
        segtransforms.Crop(
            [input_size_target_crop[1], input_size_target_crop[0]],
            crop_type='rand',
            padding=mean_mapping,
            ignore_label=args.ignore_label),
        transforms_seg.ToTensor(),
        target_normalize
    ])
    trainloader = data.DataLoader(GTA5DataSet(args.data_dir,
                                              args.data_list,
                                              max_iters=args.num_steps *
                                              args.iter_size * args.batch_size,
                                              crop_size=input_size,
                                              transform=source_transform),
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=1,
                                  pin_memory=True)

    trainloader_iter = enumerate(trainloader)

    targetloader = data.DataLoader(fake_cityscapesDataSet(
        args.data_dir_target,
        args.data_list_target,
        max_iters=args.num_steps * args.iter_size * args.batch_size,
        crop_size=input_size_target,
        set=args.set,
        transform=target_transform),
                                   batch_size=args.batch_size,
                                   shuffle=True,
                                   num_workers=1,
                                   pin_memory=True)

    targetloader_iter = enumerate(targetloader)
    # implement model.optim_parameters(args) to handle different models' lr setting

    criterion_seg = torch.nn.CrossEntropyLoss(ignore_index=255, reduce=False)
    criterion_pseudo = torch.nn.BCEWithLogitsLoss(reduce=False).cuda()
    bce_loss = torch.nn.BCEWithLogitsLoss().cuda()
    criterion_reconst = torch.nn.L1Loss().cuda()
    criterion_soft_pseudo = torch.nn.MSELoss(reduce=False).cuda()
    criterion_box = torch.nn.CrossEntropyLoss(ignore_index=255, reduce=False)
    interp = nn.Upsample(size=(input_size[1], input_size[0]),
                         align_corners=True,
                         mode='bilinear')
    interp_target = nn.Upsample(size=(input_size_target[1],
                                      input_size_target[0]),
                                align_corners=True,
                                mode='bilinear')

    # labels for adversarial training
    source_label = 0
    target_label = 1

    optimizer_encoder, optimizer_decoder, optimizer_disc, optimizer_reconst = optimizers
    batch_time = AverageMeter(10)
    loss_seg_value1 = AverageMeter(10)
    best_mIoUs = 0
    best_test_mIoUs = 0
    loss_seg_value2 = AverageMeter(10)
    loss_reconst_source_value = AverageMeter(10)
    loss_reconst_target_value = AverageMeter(10)
    loss_source_disc_value = AverageMeter(10)
    loss_source_disc_adv_value = AverageMeter(10)
    loss_balance_value = AverageMeter(10)
    loss_target_disc_value = AverageMeter(10)
    loss_target_disc_adv_value = AverageMeter(10)
    loss_pseudo_value = AverageMeter(10)
    bounding_num = AverageMeter(10)
    pseudo_num = AverageMeter(10)
    loss_bbx_att_value = AverageMeter(10)

    for i_iter in range(args.num_steps):
        # train G

        # don't accumulate grads in D

        end = time.time()
        _, batch = trainloader_iter.__next__()
        images, labels, _ = batch
        images = Variable(images).cuda(async=True)
        labels = Variable(labels).cuda(async=True)
        seg, loss_seg2 = model(images, labels)

        loss_seg2 = torch.mean(loss_seg2)
        loss = args.lambda_trade_off * (loss_seg2)
        '''
        source_tensor = Variable(torch.FloatTensor(disc.size()).fill_(source_label)).cuda()
        loss_source_disc = bce_loss(disc, source_tensor)

        loss += loss_source_disc * args.lambda_disc
        '''
        # proper normalization
        #logger.info(loss_seg1.data.cpu().numpy())
        loss_seg_value2.update(loss_seg2.data.cpu().numpy())
        #loss_source_disc_value.update(loss_source_disc.data.cpu().numpy())
        # train with target
        optimizer_encoder.zero_grad()
        optimizer_decoder.zero_grad()
        loss.backward()
        #optimizer.step()
        optimizer_encoder.step()
        optimizer_decoder.step()
        #optimizer_disc.step()

        del seg, loss_seg2

        batch_time.update(time.time() - end)

        remain_iter = args.num_steps - i_iter
        remain_time = remain_iter * batch_time.avg
        t_m, t_s = divmod(remain_time, 60)
        t_h, t_m = divmod(t_m, 60)
        remain_time = '{:02d}:{:02d}:{:02d}'.format(int(t_h), int(t_m),
                                                    int(t_s))

        adjust_learning_rate(optimizer_encoder, i_iter, args.lr_encoder, args)
        adjust_learning_rate(optimizer_decoder, i_iter, args.lr_decoder, args)
        if i_iter % args.print_freq == 0:
            lr_encoder = optimizer_encoder.param_groups[0]['lr']
            lr_decoder = optimizer_decoder.param_groups[0]['lr']
            logger.info('exp = {}'.format(args.snapshot_dir))
            logger.info(
                'Iter = [{0}/{1}]\t'
                'Time = {batch_time.avg:.3f}\t'
                'loss_seg1 = {loss_seg1.avg:4f}\t'
                'loss_seg2 = {loss_seg2.avg:.4f}\t'
                'loss_source_disc = {loss_source_disc.avg:.4f}\t'
                'loss_source_disc_adv = {loss_source_disc_adv.avg:.4f}\t'
                'loss_target_disc = {loss_target_disc.avg:.4f}\t'
                'loss_target_disc_adv = {loss_target_disc_adv.avg:.4f}\t'
                'loss_reconst_source = {loss_reconst_source.avg:.4f}\t'
                'loss_bbx_att = {loss_bbx_att.avg:.4f}\t'
                'loss_reconst_target = {loss_reconst_target.avg:.4f}\t'
                'loss_pseudo = {loss_pseudo.avg:.4f}\t'
                'loss_balance = {loss_balance.avg:.4f}\t'
                'bounding_num = {bounding_num.avg:.4f}\t'
                'pseudo_num = {pseudo_num.avg:4f}\t'
                'lr_encoder = {lr_encoder:.8f} lr_decoder = {lr_decoder:.8f}'.
                format(i_iter,
                       args.num_steps,
                       batch_time=batch_time,
                       loss_seg1=loss_seg_value1,
                       loss_seg2=loss_seg_value2,
                       loss_source_disc=loss_source_disc_value,
                       loss_pseudo=loss_pseudo_value,
                       loss_source_disc_adv=loss_source_disc_adv_value,
                       loss_bbx_att=loss_bbx_att_value,
                       bounding_num=bounding_num,
                       pseudo_num=pseudo_num,
                       loss_target_disc=loss_target_disc_value,
                       loss_target_disc_adv=loss_target_disc_adv_value,
                       loss_reconst_source=loss_reconst_source_value,
                       loss_balance=loss_balance_value,
                       loss_reconst_target=loss_reconst_target_value,
                       lr_encoder=lr_encoder,
                       lr_decoder=lr_decoder))

            logger.info("remain_time: {}".format(remain_time))
            if not tb_logger is None:
                tb_logger.add_scalar('loss_seg_value1', loss_seg_value1.avg,
                                     i_iter)
                tb_logger.add_scalar('loss_seg_value2', loss_seg_value2.avg,
                                     i_iter)
                tb_logger.add_scalar('loss_source_disc',
                                     loss_source_disc_value.avg, i_iter)
                tb_logger.add_scalar('loss_source_disc_adv',
                                     loss_source_disc_adv_value.avg, i_iter)
                tb_logger.add_scalar('loss_target_disc',
                                     loss_target_disc_value.avg, i_iter)
                tb_logger.add_scalar('loss_target_disc_adv',
                                     loss_target_disc_adv_value.avg, i_iter)
                tb_logger.add_scalar('bounding_num', bounding_num.avg, i_iter)
                tb_logger.add_scalar('pseudo_num', pseudo_num.avg, i_iter)
                tb_logger.add_scalar('loss_pseudo', loss_pseudo_value.avg,
                                     i_iter)
                tb_logger.add_scalar('lr', lr_encoder, i_iter)
                tb_logger.add_scalar('loss_balance', loss_balance_value.avg,
                                     i_iter)
            #####
            #save image result

            if i_iter % args.save_pred_every == 0 and i_iter != 0:
                logger.info('taking snapshot ...')
                model.eval()

                val_time = time.time()
                hist = np.zeros((19, 19))
                f = open(args.result_dir, 'a')
                for index, batch in tqdm(enumerate(testloader)):
                    with torch.no_grad():
                        image, name = batch
                        output2, _ = model(Variable(image).cuda(), None)
                        pred = interp_val(output2)
                        del output2
                        pred = pred.cpu().data[0].numpy()
                        pred = pred.transpose(1, 2, 0)
                        pred = np.asarray(np.argmax(pred, axis=2),
                                          dtype=np.uint8)
                        label = np.array(Image.open(gt_imgs_val[index]))
                        #label = np.array(label.resize(com_size, Image.
                        label = label_mapping(label, mapping)
                        #logger.info(label.shape)
                        hist += fast_hist(label.flatten(), pred.flatten(), 19)
                mIoUs = per_class_iu(hist)
                for ind_class in range(args.num_classes):
                    logger.info('===>' + name_classes[ind_class] + ':\t' +
                                str(round(mIoUs[ind_class] * 100, 2)))
                    tb_logger.add_scalar(name_classes[ind_class] + '_mIoU',
                                         mIoUs[ind_class], i_iter)

                mIoUs = round(np.nanmean(mIoUs) * 100, 2)

                logger.info(mIoUs)
                tb_logger.add_scalar('val mIoU', mIoUs, i_iter)
                tb_logger.add_scalar('val mIoU', mIoUs, i_iter)
                net_encoder, net_decoder, net_disc, net_reconst = nets
                save_checkpoint(net_encoder, 'encoder', i_iter, args,
                                is_best_test)
                save_checkpoint(net_decoder, 'decoder', i_iter, args,
                                is_best_test)
            model.train()