Exemplo n.º 1
0
def loading_data():
    mean_std = cfg.DATA.MEAN_STD
    train_simul_transform = own_transforms.Compose([
        own_transforms.Scale(int(cfg.TRAIN.IMG_SIZE[0] / 0.875)),
        own_transforms.RandomCrop(cfg.TRAIN.IMG_SIZE),
        own_transforms.RandomHorizontallyFlip()
    ])
    val_simul_transform = own_transforms.Compose([
        own_transforms.Scale(int(cfg.TRAIN.IMG_SIZE[0] / 0.875)),
        own_transforms.CenterCrop(cfg.TRAIN.IMG_SIZE)
    ])
    img_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])
    target_transform = standard_transforms.Compose([
        own_transforms.MaskToTensor(),
        own_transforms.ChangeLabel(cfg.DATA.IGNORE_LABEL, cfg.DATA.NUM_CLASSES - 1)
    ])
    restore_transform = standard_transforms.Compose([
        own_transforms.DeNormalize(*mean_std),
        standard_transforms.ToPILImage()
    ])

    train_set = CityScapes('train', simul_transform=train_simul_transform, transform=img_transform,
                           target_transform=target_transform)
    train_loader = DataLoader(train_set, batch_size=cfg.TRAIN.BATCH_SIZE, num_workers=16, shuffle=True)
    val_set = CityScapes('val', simul_transform=val_simul_transform, transform=img_transform,
                         target_transform=target_transform)
    val_loader = DataLoader(val_set, batch_size=cfg.VAL.BATCH_SIZE, num_workers=16, shuffle=False)

    return train_loader, val_loader, restore_transform
Exemplo n.º 2
0
def main(train_args):
    net = FCN32VGG(num_classes=voc.num_classes)

    if len(train_args['snapshot']) == 0:
        curr_epoch = 1
        train_args['best_record'] = {
            'epoch': 0,
            'val_loss': 1e10,
            'acc': 0,
            'acc_cls': 0,
            'mean_iu': 0,
            'fwavacc': 0
        }
    else:
        print('training resumes from ' + train_args['snapshot'])
        net.load_state_dict(
            torch.load(
                os.path.join(ckpt_path, exp_name, train_args['snapshot'])))
        split_snapshot = train_args['snapshot'].split('_')
        curr_epoch = int(split_snapshot[1]) + 1
        train_args['best_record'] = {
            'epoch': int(split_snapshot[1]),
            'val_loss': float(split_snapshot[3]),
            'acc': float(split_snapshot[5]),
            'acc_cls': float(split_snapshot[7]),
            'mean_iu': float(split_snapshot[9]),
            'fwavacc': float(split_snapshot[11])
        }

    curr_epoch = 1
    train_args['best_record'] = {
        'epoch': 0,
        'val_loss': 1e10,
        'acc': 0,
        'acc_cls': 0,
        'mean_iu': 0,
        'fwavacc': 0
    }

    mean_std = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])

    input_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])
    target_transform = extended_transforms.MaskToTensor()
    restore_transform = standard_transforms.Compose([
        extended_transforms.DeNormalize(*mean_std),
        standard_transforms.ToPILImage(),
    ])
    visualize = standard_transforms.Compose([
        standard_transforms.Resize(400),  # was .Scale(400)
        standard_transforms.CenterCrop(400),
        standard_transforms.ToTensor()
    ])

    train_set = voc.VOC('train',
                        transform=input_transform,
                        target_transform=target_transform)
    train_loader = DataLoader(train_set,
                              batch_size=1,
                              num_workers=4,
                              shuffle=True)
    val_set = voc.VOC('val',
                      transform=input_transform,
                      target_transform=target_transform)
    val_loader = DataLoader(val_set,
                            batch_size=1,
                            num_workers=4,
                            shuffle=False)

    # was CrossEntropyLoss2d
    criterion = torch.nn.CrossEntropyLoss(
        size_average=False, ignore_index=voc.ignore_label).to(device)

    optimizer = optim.Adam([{
        'params': [
            param
            for name, param in net.named_parameters() if name[-4:] == 'bias'
        ],
        'lr':
        2 * train_args['lr']
    }, {
        'params': [
            param
            for name, param in net.named_parameters() if name[-4:] != 'bias'
        ],
        'lr':
        train_args['lr'],
        'weight_decay':
        train_args['weight_decay']
    }],
                           betas=(train_args['momentum'], 0.999))

    scheduler = ReduceLROnPlateau(optimizer,
                                  'min',
                                  patience=train_args['lr_patience'],
                                  min_lr=1e-10,
                                  verbose=True)
    for epoch in range(curr_epoch, train_args['epoch_num'] + 1):
        train(train_loader, net, criterion, optimizer, epoch, train_args)
        val_loss = validate(val_loader, net, criterion, optimizer, epoch,
                            train_args, restore_transform, visualize)
        scheduler.step(val_loss)
Exemplo n.º 3
0
def main(args):
    print('=' * 10, 'Starting', '=' * 10, '\n')
    print(device)

    # Set the seed for reproducing the results
    random.seed(args.manual_seed)
    np.random.seed(args.manual_seed)
    torch.manual_seed(args.manual_seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(args.manual_seed)
        cudnn.benchmark = True

    # Set up results folder
    if not os.path.exists(os.path.join(ROOT, RESULT, 'saved_val_images')):
        os.makedirs(os.path.join(ROOT, RESULT, 'saved_val_images'))
    if not os.path.exists(os.path.join(ROOT, RESULT, 'saved_train_images')):
        os.makedirs(os.path.join(ROOT, RESULT, 'saved_train_images'))

    # Setup Dataloader
    data_loader = get_loader(args.dataset)

    input_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    target_transform = extended_transforms.MaskToTensor()

    traindata = data_loader('train',
                            n_classes=args.n_classes,
                            transform=input_transform,
                            target_transform=target_transform,
                            do_transform=True)
    trainloader = data.DataLoader(traindata,
                                  batch_size=args.batch_size,
                                  num_workers=2,
                                  shuffle=True)
    valdata = data_loader('val',
                          n_classes=args.n_classes,
                          transform=input_transform,
                          target_transform=target_transform)
    valloader = data.DataLoader(valdata,
                                batch_size=args.batch_size,
                                num_workers=2,
                                shuffle=False)

    n_classes = traindata.n_classes
    n_trainsamples = len(traindata)
    n_iters_per_epoch = np.ceil(n_trainsamples /
                                float(args.batch_size * args.iter_size))

    # Setup Model
    model = get_model(name=args.arch,
                      n_classes=n_classes,
                      ignore_index=traindata.ignore_index,
                      output_stride=args.output_stride,
                      pretrained=args.pretrained,
                      momentum_bn=args.momentum_bn,
                      dprob=args.dprob).to(device)

    epochs_done = 0
    X = []
    Y1 = []
    Y1_test = []
    Y2 = []
    Y2_test = []
    avg_pixel_acc = 0
    mean_class_acc = 0
    mIoU = 0
    avg_pixel_acc_test = 0
    mean_class_acc_test = 0
    mIoU_test = 0
    best_mIoU = 0
    best_epoch = 0

    if args.model_path:
        model_name = args.model_path.split('.')
        checkpoint_name = model_name[0] + '_optimizer.pkl'
        checkpoint = torch.load(os.path.join(ROOT, RESULT, checkpoint_name))
        optm = checkpoint['optimizer']
        model.load_state_dict(checkpoint['state_dict'])
        split_str = model_name[0].split('_')
        epochs_done = int(split_str[-1])
        saved_loss = pickle.load(
            open(os.path.join(ROOT, RESULT, "saved_loss.p"), "rb"))
        saved_accuracy = pickle.load(
            open(os.path.join(ROOT, RESULT, "saved_accuracy.p"), "rb"))
        X = saved_loss["X"][:epochs_done]
        Y = saved_loss["Y"][:epochs_done]
        Y_test = saved_loss["Y_test"][:epochs_done]
        avg_pixel_acc = saved_accuracy["P"][:epochs_done, :]
        mean_class_acc = saved_accuracy["M"][:epochs_done, :]
        mIoU = saved_accuracy["I"][:epochs_done, :]
        avg_pixel_acc_test = saved_accuracy["P_test"][:epochs_done, :]
        mean_class_acc_test = saved_accuracy["M_test"][:epochs_done, :]
        mIoU_test = saved_accuracy["I_test"][:epochs_done, :]

    if args.best_model_path:
        best_model_name = args.best_model_path.split('_')
        best_mIoU = float(best_model_name[-2])
        best_epoch = int(best_model_name[-3])

    # Learning rates: For new layers (such as final layer), we set lr to be 10x the learning rate of layers already trained
    bias_10x_params = filter(
        lambda x: ('bias' in x[0]) and ('final' in x[0]) and ('conv' in x[0]),
        model.named_parameters())
    bias_10x_params = list(map(lambda x: x[1], bias_10x_params))

    bias_params = filter(lambda x: ('bias' in x[0]) and ('final' not in x[0]),
                         model.named_parameters())
    bias_params = list(map(lambda x: x[1], bias_params))

    nonbias_10x_params = filter(
        lambda x:
        (('bias' not in x[0]) or ('bn' in x[0])) and ('final' in x[0]),
        model.named_parameters())
    nonbias_10x_params = list(map(lambda x: x[1], nonbias_10x_params))

    nonbias_params = filter(
        lambda x: ('bias' not in x[0]) and ('final' not in x[0]),
        model.named_parameters())
    nonbias_params = list(map(lambda x: x[1], nonbias_params))

    optimizer = torch.optim.SGD([
        {
            'params': bias_params,
            'lr': args.lr
        },
        {
            'params': bias_10x_params,
            'lr': 20 * args.lr if args.pretrained else args.lr
        },
        {
            'params': nonbias_10x_params,
            'lr': 10 * args.lr if args.pretrained else args.lr
        },
        {
            'params': nonbias_params,
            'lr': args.lr
        },
    ],
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=(args.optim == 'Nesterov'))
    num_param_groups = 4

    # optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)

    # Setting up scheduler
    if args.model_path and args.restore:
        # Here we restore all states of optimizer
        optimizer.load_state_dict(optm)
        total_iters = n_iters_per_epoch * args.epochs
        lambda1 = lambda step: 0.5 + 0.5 * math.cos(np.pi * step / total_iters)
        scheduler = lr_scheduler.LambdaLR(
            optimizer,
            lr_lambda=[lambda1] * num_param_groups,
            last_epoch=epochs_done * n_iters_per_epoch)
        # scheduler = lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.1, last_epoch=epochs_done)
    else:
        # scheduler = lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.1)
        # Here we simply restart the training
        # if args.T0:
        #     total_iters = args.T0 * n_iters_per_epoch
        # else:
        total_iters = ((args.epochs - epochs_done) * n_iters_per_epoch)
        lambda1 = lambda step: 0.5 + 0.5 * math.cos(np.pi * step / total_iters)
        scheduler = lr_scheduler.LambdaLR(optimizer,
                                          lr_lambda=[lambda1] *
                                          num_param_groups)

    global l_avg, totalclasswise_pixel_acc, totalclasswise_gtpixels, totalclasswise_predpixels
    global l_avg_test, totalclasswise_pixel_acc_test, totalclasswise_gtpixels_test, totalclasswise_predpixels_test
    global steps, steps_test

    criterion_sbd = nn.CrossEntropyLoss(size_average=False,
                                        ignore_index=traindata.ignore_index)
    criterion_lip = nn.CrossEntropyLoss(size_average=False,
                                        ignore_index=traindata.ignore_index)
    criterions = [criterion_sbd, criterion_lip]

    for epoch in range(epochs_done, args.epochs):
        print('=' * 10, 'Epoch %d' % (epoch + 1), '=' * 10)
        l_avg = [0, 0]
        totalclasswise_pixel_acc = [0, 0]
        totalclasswise_gtpixels = [0, 0]
        totalclasswise_predpixels = [0, 0]
        l_avg_test = [0, 0]
        totalclasswise_pixel_acc_test = [0, 0]
        totalclasswise_gtpixels_test = [0, 0]
        totalclasswise_predpixels_test = [0, 0]
        steps = [0, 0]
        steps_test = [0, 0]

        # scheduler.step()
        train(model, optimizer, criterions, trainloader, epoch, scheduler,
              traindata)
        val(model, criterions, valloader, epoch, valdata)

        # save the model every 5 epochs
        if (epoch + 1) % 5 == 0 or epoch == args.epochs - 1:
            if (epoch + 1) > 5:
                os.remove(
                    os.path.join(
                        ROOT, RESULT,
                        "{}_{}_{}.pkl".format(args.arch, args.dataset,
                                              epoch - 4)))
                os.remove(
                    os.path.join(
                        ROOT, RESULT, "{}_{}_{}_optimizer.pkl".format(
                            args.arch, args.dataset, epoch - 4)))
            torch.save(
                model,
                os.path.join(
                    ROOT, RESULT,
                    "{}_{}_{}.pkl".format(args.arch, args.dataset, epoch + 1)))
            torch.save(
                {
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict()
                },
                os.path.join(
                    ROOT, RESULT,
                    "{}_{}_{}_optimizer.pkl".format(args.arch, args.dataset,
                                                    epoch + 1)))

        # remove old loss & accuracy files
        if os.path.isfile(os.path.join(ROOT, RESULT, "saved_loss.p")):
            os.remove(os.path.join(ROOT, RESULT, "saved_loss.p"))
        if os.path.isfile(os.path.join(ROOT, RESULT, "saved_accuracy.p")):
            os.remove(os.path.join(ROOT, RESULT, "saved_accuracy.p"))

        # save train and validation loss
        X.append(epoch + 1)
        Y1.append(l_avg[0] / steps[0])
        Y1_test.append(l_avg_test[0] / steps_test[0])
        Y2.append(l_avg[1] / steps[1])
        Y2_test.append(l_avg_test[1] / steps_test[1])
        saved_loss = {
            "X": X,
            "Y1": Y1,
            "Y2": Y2,
            "Y1_test": Y1_test,
            "Y2_test": Y2_test
        }
        pickle.dump(saved_loss,
                    open(os.path.join(ROOT, RESULT, "saved_loss.p"), "wb"))

        # pixel accuracy
        totalclasswise_pixel_acc[0] = totalclasswise_pixel_acc[0].reshape(
            (-1, n_classes[0])).astype(np.float32)
        totalclasswise_gtpixels[0] = totalclasswise_gtpixels[0].reshape(
            (-1, n_classes[0]))
        totalclasswise_predpixels[0] = totalclasswise_predpixels[0].reshape(
            (-1, n_classes[0]))
        totalclasswise_pixel_acc_test[0] = totalclasswise_pixel_acc_test[
            0].reshape((-1, n_classes[0])).astype(np.float32)
        totalclasswise_gtpixels_test[0] = totalclasswise_gtpixels_test[
            0].reshape((-1, n_classes[0]))
        totalclasswise_predpixels_test[0] = totalclasswise_predpixels_test[
            0].reshape((-1, n_classes[0]))

        totalclasswise_pixel_acc[1] = totalclasswise_pixel_acc[1].reshape(
            (-1, n_classes[1])).astype(np.float32)
        totalclasswise_gtpixels[1] = totalclasswise_gtpixels[1].reshape(
            (-1, n_classes[1]))
        totalclasswise_predpixels[1] = totalclasswise_predpixels[1].reshape(
            (-1, n_classes[1]))
        totalclasswise_pixel_acc_test[1] = totalclasswise_pixel_acc_test[
            1].reshape((-1, n_classes[1])).astype(np.float32)
        totalclasswise_gtpixels_test[1] = totalclasswise_gtpixels_test[
            1].reshape((-1, n_classes[1]))
        totalclasswise_predpixels_test[1] = totalclasswise_predpixels_test[
            1].reshape((-1, n_classes[1]))

        if isinstance(avg_pixel_acc, list):
            avg_pixel_acc[0] = np.vstack(
                (avg_pixel_acc[0],
                 np.sum(totalclasswise_pixel_acc[0], axis=1) /
                 np.sum(totalclasswise_gtpixels[0], axis=1)))
            mean_class_acc[0] = np.vstack(
                (mean_class_acc[0],
                 np.mean(totalclasswise_pixel_acc[0] /
                         totalclasswise_gtpixels[0],
                         axis=1)))
            mIoU[0] = np.vstack(
                (mIoU[0],
                 np.mean(
                     totalclasswise_pixel_acc[0] /
                     (totalclasswise_gtpixels[0] + totalclasswise_predpixels[0]
                      - totalclasswise_pixel_acc[0]),
                     axis=1)))
            avg_pixel_acc[1] = np.vstack(
                (avg_pixel_acc[1],
                 np.sum(totalclasswise_pixel_acc[1], axis=1) /
                 np.sum(totalclasswise_gtpixels[1], axis=1)))
            mean_class_acc[1] = np.vstack(
                (mean_class_acc[1],
                 np.mean(totalclasswise_pixel_acc[1] /
                         totalclasswise_gtpixels[1],
                         axis=1)))
            mIoU[1] = np.vstack(
                (mIoU[1],
                 np.mean(
                     totalclasswise_pixel_acc[1] /
                     (totalclasswise_gtpixels[1] + totalclasswise_predpixels[1]
                      - totalclasswise_pixel_acc[1]),
                     axis=1)))

            avg_pixel_acc_test[0] = np.vstack(
                (avg_pixel_acc_test[0],
                 np.sum(totalclasswise_pixel_acc_test[0], axis=1) /
                 np.sum(totalclasswise_gtpixels_test[0], axis=1)))
            mean_class_acc_test[0] = np.vstack(
                (mean_class_acc_test[0],
                 np.mean(totalclasswise_pixel_acc_test[0] /
                         totalclasswise_gtpixels_test[0],
                         axis=1)))
            mIoU_test[0] = np.vstack(
                (mIoU_test[0],
                 np.mean(totalclasswise_pixel_acc_test[0] /
                         (totalclasswise_gtpixels_test[0] +
                          totalclasswise_predpixels_test[0] -
                          totalclasswise_pixel_acc_test[0]),
                         axis=1)))
            avg_pixel_acc_test[1] = np.vstack(
                (avg_pixel_acc_test[1],
                 np.sum(totalclasswise_pixel_acc_test[1], axis=1) /
                 np.sum(totalclasswise_gtpixels_test[1], axis=1)))
            mean_class_acc_test[1] = np.vstack(
                (mean_class_acc_test[1],
                 np.mean(totalclasswise_pixel_acc_test[1] /
                         totalclasswise_gtpixels_test[1],
                         axis=1)))
            mIoU_test[1] = np.vstack(
                (mIoU_test[1],
                 np.mean(totalclasswise_pixel_acc_test[1] /
                         (totalclasswise_gtpixels_test[1] +
                          totalclasswise_predpixels_test[1] -
                          totalclasswise_pixel_acc_test[1]),
                         axis=1)))
        else:
            avg_pixel_acc = []
            mean_class_acc = []
            mIoU = []
            avg_pixel_acc.append(
                np.sum(totalclasswise_pixel_acc[0], axis=1) /
                np.sum(totalclasswise_gtpixels[0], axis=1))
            mean_class_acc.append(
                np.mean(totalclasswise_pixel_acc[0] /
                        totalclasswise_gtpixels[0],
                        axis=1))
            mIoU.append(
                np.mean(
                    totalclasswise_pixel_acc[0] /
                    (totalclasswise_gtpixels[0] + totalclasswise_predpixels[0]
                     - totalclasswise_pixel_acc[0]),
                    axis=1))
            avg_pixel_acc.append(
                np.sum(totalclasswise_pixel_acc[1], axis=1) /
                np.sum(totalclasswise_gtpixels[1], axis=1))
            mean_class_acc.append(
                np.mean(totalclasswise_pixel_acc[1] /
                        totalclasswise_gtpixels[1],
                        axis=1))
            mIoU.append(
                np.mean(
                    totalclasswise_pixel_acc[1] /
                    (totalclasswise_gtpixels[1] + totalclasswise_predpixels[1]
                     - totalclasswise_pixel_acc[1]),
                    axis=1))

            avg_pixel_acc_test = []
            mean_class_acc_test = []
            mIoU_test = []
            avg_pixel_acc_test.append(
                np.sum(totalclasswise_pixel_acc_test[0], axis=1) /
                np.sum(totalclasswise_gtpixels_test[0], axis=1))
            mean_class_acc_test.append(
                np.mean(totalclasswise_pixel_acc_test[0] /
                        totalclasswise_gtpixels_test[0],
                        axis=1))
            mIoU_test.append(
                np.mean(totalclasswise_pixel_acc_test[0] /
                        (totalclasswise_gtpixels_test[0] +
                         totalclasswise_predpixels_test[0] -
                         totalclasswise_pixel_acc_test[0]),
                        axis=1))
            avg_pixel_acc_test.append(
                np.sum(totalclasswise_pixel_acc_test[1], axis=1) /
                np.sum(totalclasswise_gtpixels_test[1], axis=1))
            mean_class_acc_test.append(
                np.mean(totalclasswise_pixel_acc_test[1] /
                        totalclasswise_gtpixels_test[1],
                        axis=1))
            mIoU_test.append(
                np.mean(totalclasswise_pixel_acc_test[1] /
                        (totalclasswise_gtpixels_test[1] +
                         totalclasswise_predpixels_test[1] -
                         totalclasswise_pixel_acc_test[1]),
                        axis=1))

        saved_accuracy = {
            "X": X,
            "P1": avg_pixel_acc[0],
            "P2": avg_pixel_acc[1],
            "M1": mean_class_acc[0],
            "M2": mean_class_acc[1],
            "I1": mIoU[0],
            "I2": mIoU[1],
            "P1_test": avg_pixel_acc_test[0],
            "P2_test": avg_pixel_acc_test[1],
            "M1_test": mean_class_acc_test[0],
            "M2_test": mean_class_acc_test[1],
            "I1_test": mIoU_test[0],
            "I2_test": mIoU_test[1]
        }
        pickle.dump(saved_accuracy,
                    open(os.path.join(ROOT, RESULT, "saved_accuracy.p"), "wb"))

        # print validation mIoU of both tasks
        this_mIoU1 = np.mean(totalclasswise_pixel_acc_test[0] /
                             (totalclasswise_gtpixels_test[0] +
                              totalclasswise_predpixels_test[0] -
                              totalclasswise_pixel_acc_test[0]),
                             axis=1)[0]
        this_mIoU2 = np.mean(totalclasswise_pixel_acc_test[1] /
                             (totalclasswise_gtpixels_test[1] +
                              totalclasswise_predpixels_test[1] -
                              totalclasswise_pixel_acc_test[1]),
                             axis=1)[0]
        print('Val: mIoU_sbd = {}, mIoU_lip = {}'.format(
            this_mIoU1, this_mIoU2))
Exemplo n.º 4
0
import numpy as np
from datasets.dataset import yaogan
from torch.utils.data import DataLoader
import transforms

####计算weight权重

target_transform = transforms.MaskToTensor()
input_transfom = transforms.MaskToTensor()

if __name__ == "__main__":
    yaogan = yaogan(mode='train',
                    input_transform=input_transfom,
                    target_transform=target_transform)
    train_loader = DataLoader(yaogan, batch_size=24, num_workers=8)
    weight = np.zeros((1, 8)).squeeze(0).astype(np.uint)
    for index, data in enumerate(train_loader):
        img, label = data
        label = np.array(label).reshape(256 * 256 * 24,
                                        1).squeeze(1).astype(np.uint8)
        weight_ = np.bincount(label, minlength=8)
        weight = weight_ + weight
    print(weight)
    pix_partion = weight / weight.sum()  ##计算像素占比
    weight_all = 1 / pix_partion  ##计算权重
    max = max(weight_all)
    weight_all = weight_all / max  ##权重归一
    print(weight_all)
    print(pix_partion)

# import os
Exemplo n.º 5
0
def main(train_args, model):
    print(train_args)

    net = model.cuda()

    if len(train_args['snapshot']) == 0:
        curr_epoch = 1
        train_args['best_record'] = {'epoch': 0, 'val_loss': 1e10, 'acc': 0, 'acc_cls': 0, 'mean_iu': 0, 'fwavacc': 0}
    else:
        print('training resumes from ' + train_args['snapshot'])
        net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, train_args['snapshot'])))
        split_snapshot = train_args['snapshot'].split('_')
        curr_epoch = int(split_snapshot[1]) + 1
        train_args['best_record'] = {'epoch': int(split_snapshot[1]), 'val_loss': float(split_snapshot[3]),
                                     'acc': float(split_snapshot[5]), 'acc_cls': float(split_snapshot[7]),
                                     'mean_iu': float(split_snapshot[9]), 'fwavacc': float(split_snapshot[11])}

    net.train()

    mean_std = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])

    input_transform = standard_transforms.Compose([
        standard_transforms.Pad(200),
        standard_transforms.CenterCrop(320),
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])

    train_transform = standard_transforms.Compose([
        standard_transforms.Pad(200),
        standard_transforms.CenterCrop(320),
        extended_transforms.MaskToTensor()])

    target_transform = extended_transforms.MaskToTensor()

    restore_transform = standard_transforms.Compose([
        extended_transforms.DeNormalize(*mean_std),
        standard_transforms.ToPILImage(),
    ])

    visualize = standard_transforms.Compose([
        standard_transforms.Resize(400),
        standard_transforms.CenterCrop(400),
        standard_transforms.ToTensor()
    ])

    train_set = VOCSegmentation(root='./', image_set='train', transform=input_transform, target_transform=train_transform)
    train_loader = DataLoader(train_set, batch_size=1, num_workers=4, shuffle=True)
    val_set = VOCSegmentation(root='./', image_set='val', transform=input_transform, target_transform=train_transform)
    val_loader = DataLoader(val_set, batch_size=1, num_workers=4, shuffle=False)

    #criterion = CrossEntropyLoss().cuda()#2d(size_average=False, ignore_index=voc.ignore_label).cuda()
    criterion = CrossEntropyLoss(size_average=False, ignore_index=255).cuda()

    optimizer = optim.SGD([
        {'params': [param for name, param in net.named_parameters() if name[-4:] == 'bias'],
         'lr': 2 * train_args['lr']},
        {'params': [param for name, param in net.named_parameters() if name[-4:] != 'bias'],
         'lr': train_args['lr'], 'weight_decay': train_args['weight_decay']}
    ], momentum=train_args['momentum'])

    """optimizer = optim.Adam([
        {'params': [param for name, param in net.named_parameters() if name[-4:] == 'bias'],
         'lr': 2 * train_args['lr']},
        {'params': [param for name, param in net.named_parameters() if name[-4:] != 'bias'],
         'lr': train_args['lr'], 'weight_decay': train_args['weight_decay']}
    ], betas=(train_args['momentum'], 0.999))"""

    if len(train_args['snapshot']) > 0:
        optimizer.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, 'opt_' + train_args['snapshot'])))
        optimizer.param_groups[0]['lr'] = 2 * train_args['lr']
        optimizer.param_groups[1]['lr'] = train_args['lr']

    """check_mkdir(ckpt_path)
    check_mkdir(os.path.join(ckpt_path, exp_name))
    open(os.path.join(ckpt_path, exp_name, str(datetime.datetime.now()) + '.txt'), 'w').write(str(train_args) + '\n\n')"""

    scheduler = ReduceLROnPlateau(optimizer, 'min', patience=train_args['lr_patience'], min_lr=1e-10, verbose=True)
    for epoch in range(curr_epoch, train_args['epoch_num'] + 1):
        train(train_loader, net, criterion, optimizer, epoch, train_args)
        val_loss, imges = validate(val_loader, net, criterion, optimizer, epoch, train_args, restore_transform, visualize)
        #imges.show()
        scheduler.step(val_loss)
    return imges
def main(train_args):
    check_mkdir(os.path.join(train_args['ckpt_path'], args['exp']))
    check_mkdir(
        os.path.join(train_args['ckpt_path'], args['exp'],
                     train_args['exp_name']))
    model = DeepLabV3('1')

    # print(model)
    device = torch.device("cuda")

    num_gpu = list(range(torch.cuda.device_count()))
    """###############------use gpu--------###############"""
    if args['use_gpu']:
        ts = time.time()
        print(torch.cuda.current_device())
        print(torch.cuda.get_device_name(0))

        model = nn.DataParallel(model, device_ids=num_gpu)
        model = model.to(device)
        print("Finish cuda loading ,time elapsed {}", format(time.time() - ts))
    else:
        print("please check your gpu device,start training on cpu")
    """###############-------中间开始训练--------###############"""
    if len(train_args['snapshot']) == 0:
        curr_epoch = 1
        train_args['best_record'] = {
            'epoch': 0,
            'val_loss': 1e10,
            'acc': 0,
            'acc_cls': 0,
            'mean_iu': 0,
            'fwavacc': 0
        }
        # model.apply(weights_init)
    else:
        print("train resume from " + train_args['snapshot'])

        state_dict = torch.load(
            os.path.join(train_args['ckpt_path'], args['exp'],
                         train_args['exp_name'], train_args['snapshot']))
        new_state_dict = OrderedDict()

        for k, v in state_dict.items():
            name = k[7:]
            new_state_dict[name] = v
        model.load_state_dict(new_state_dict)
        # model.load_state_dict(
        #     torch.load(os.path.join(train_args['ckpt_path'],args['exp'],train_args['exp_name'], train_args['snapshot'])))

        split_snapshot = train_args['snapshot'].split('_')

        curr_epoch = int(split_snapshot[1]) + 1
        train_args['best_record'] = {
            'epoch': int(split_snapshot[1]),
            'val_loss': float(split_snapshot[3]),
            'acc': float(split_snapshot[5]),
            'acc_cls': float(split_snapshot[7]),
            'mean_iu': float(split_snapshot[9]),
            'fwavacc': float(split_snapshot[11])
        }

    model.train()

    mean_std = ([0.485, 0.456, 0.406, 0.450], [0.229, 0.224, 0.225, 0.225])
    """#################---数据增强和数据变换等操作------########"""
    input_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])  ##Nomorlized
    target_transform = extended_transforms.MaskToTensor()  # target  to tensor

    joint_transform = joint_transforms.Compose([
        joint_transforms.RandomHorizontallyFlip(),
        joint_transforms.RandomCrop((256, 256), padding=0),
        joint_transforms.Rotate(degree=90)
    ])  ###data_augment

    restore = standard_transforms.Compose([
        extended_transforms.DeNormalize(*mean_std),
        extended_transforms.channel_4_to_channel_3(4, 3),  ##默认3通道如果四通道会转成三通道
        standard_transforms.ToPILImage(),
    ])  # DeNomorlized,出来是pil图片了

    visualize = standard_transforms.Compose([
        standard_transforms.Resize(256),
        standard_transforms.CenterCrop(256),  ##中心裁剪,此处可以删除
        standard_transforms.ToTensor()
    ])  # resize 大小之后转tensor
    """#################---数据加载------########"""
    train_set = yaogan(mode='train',
                       cls=train_args['training_cls'],
                       joint_transform=None,
                       input_transform=input_transform,
                       target_transform=target_transform)
    train_loader = DataLoader(train_set,
                              batch_size=train_args['batch_size'],
                              num_workers=train_args['num_works'],
                              shuffle=True)
    val_set = yaogan(mode='val',
                     cls=train_args['training_cls'],
                     input_transform=input_transform,
                     target_transform=target_transform)
    val_loader = DataLoader(val_set,
                            batch_size=1,
                            num_workers=train_args['num_works'],
                            shuffle=False)

    # test_set=yaogan(mode='test',cls=train_args['training_cls'],joint_transform=None,
    #                 input_transform=input_transform,target_transform=None)
    # test_loader=DataLoader(test_set,batch_size=1,
    #                        num_workers=train_args['num_works'], shuffle=False)

    optimizer = optim.Adadelta(model.parameters(), lr=train_args['lr'])

    ##define a weighted loss (0weight for 0 label)
    # weight=[0.09287939 ,0.02091968 ,0.02453979, 0.25752962 ,0.33731845, 1.,
    #         0.09518322, 0.52794035 ,0.24298112 ,0.02657369, 0.15057124 ,0.36864611,
    #         0.25835161,0.16672758 ,0.40728756 ,0.00751281]
    """###############-------训练数据权重--------###############"""
    if train_args['weight'] is not None:
        weight = [0.1, 1.]
        weight = torch.Tensor(weight)
    else:
        weight = None
    criterion = nn.CrossEntropyLoss(weight=weight,
                                    reduction='elementwise_mean',
                                    ignore_index=-100).to(device)
    # criterion=nn.BCELoss(weight=weight,reduction='elementwise_mean').cuda()

    check_mkdir(train_args['ckpt_path'])
    check_mkdir(os.path.join(train_args['ckpt_path'], args['exp']))
    check_mkdir(
        os.path.join(train_args['ckpt_path'], args['exp'],
                     train_args['exp_name']))
    open(
        os.path.join(train_args['ckpt_path'], args['exp'],
                     train_args['exp_name'],
                     str(time.time()) + '.txt'),
        'w').write(str(train_args) + '\n\n')
    """###############-------start training--------###############"""
    for epoch in range(curr_epoch, train_args['epoch_num'] + 1):

        adjust_lr(optimizer, epoch)
        train(train_loader, model, criterion, optimizer, epoch, train_args,
              device)
        val_loss = validate(val_loader, model, criterion, optimizer, restore,
                            epoch, train_args, visualize, device)
    writer.close()
Exemplo n.º 7
0
def main(args):
    print('=' * 10, 'Starting', '=' * 10, '\n')
    print(device)

    # Set the seed for reproducing the results
    random.seed(args.manual_seed)
    np.random.seed(args.manual_seed)
    torch.manual_seed(args.manual_seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(args.manual_seed)
        cudnn.benchmark = True

    # Set up results folder
    if not os.path.exists(os.path.join(ROOT, RESULT, 'saved_val_images')):
        os.makedirs(os.path.join(ROOT, RESULT, 'saved_val_images'))
    if not os.path.exists(os.path.join(ROOT, RESULT, 'saved_train_images')):
        os.makedirs(os.path.join(ROOT, RESULT, 'saved_train_images'))

    # Setup Dataloader
    data_loader = get_loader(args.dataset)

    input_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    target_transform = extended_transforms.MaskToTensor()

    traindata = data_loader('train',
                            n_classes=args.n_classes,
                            transform=input_transform,
                            target_transform=target_transform,
                            do_transform=True)
    trainloader = data.DataLoader(traindata,
                                  batch_size=args.batch_size,
                                  num_workers=1,
                                  shuffle=True)
    valdata = data_loader('val',
                          n_classes=args.n_classes,
                          transform=input_transform,
                          target_transform=target_transform)
    valloader = data.DataLoader(valdata,
                                batch_size=args.batch_size,
                                num_workers=1,
                                shuffle=False)

    n_classes = traindata.n_classes
    n_trainsamples_total = len(traindata)
    n_trainsamples_lip = n_trainsamples_total / 2
    n_trainsamples_sbd = n_trainsamples_total / 2
    n_iters_per_epoch_common = np.ceil(n_trainsamples_total / float(20))
    n_iters_per_epoch_lip = np.ceil(n_trainsamples_lip / float(10))
    n_iters_per_epoch_sbd = np.ceil(n_trainsamples_sbd / float(10))

    print('# total training samples = {}'.format(n_trainsamples_total))
    print('# sbd training samples = {}'.format(n_trainsamples_sbd))
    print('# lip training samples = {}'.format(n_trainsamples_lip))

    # Setup Model
    model = get_model(name=args.arch,
                      n_classes=n_classes,
                      ignore_index=traindata.ignore_index,
                      output_stride=args.output_stride,
                      pretrained=args.pretrained,
                      momentum_bn=args.momentum_bn,
                      dprob=args.dprob).to(device)

    X = []
    Y1 = []
    Y1_test = []
    Y2 = []
    Y2_test = []
    avg_pixel_acc = 0
    mean_class_acc = 0
    mIoU = 0
    avg_pixel_acc_test = 0
    mean_class_acc_test = 0
    mIoU_test = 0

    # Learning rates: For new layers (such as final layer), we set lr to be 10x the learning rate of layers already trained
    common_bias = filter(lambda x: ('bias' in x[0]) and ('final' not in x[0]),
                         model.named_parameters())
    common_bias = list(map(lambda x: x[1], common_bias))
    common_nonbias = filter(
        lambda x: ('bias' not in x[0]) and ('final' not in x[0]),
        model.named_parameters())
    common_nonbias = list(map(lambda x: x[1], common_nonbias))

    final1_bias = filter(
        lambda x: ('bias' in x[0]) and ('final1' in x[0]) and ('conv' in x[0]),
        model.named_parameters())
    final1_bias = list(map(lambda x: x[1], final1_bias))
    final1_nonbias = filter(
        lambda x:
        (('bias' not in x[0]) or ('bn' in x[0])) and ('final1' in x[0]),
        model.named_parameters())
    final1_nonbias = list(map(lambda x: x[1], final1_nonbias))

    final2_bias = filter(
        lambda x: ('bias' in x[0]) and ('final2' in x[0]) and ('conv' in x[0]),
        model.named_parameters())
    final2_bias = list(map(lambda x: x[1], final2_bias))
    final2_nonbias = filter(
        lambda x:
        (('bias' not in x[0]) or ('bn' in x[0])) and ('final2' in x[0]),
        model.named_parameters())
    final2_nonbias = list(map(lambda x: x[1], final2_nonbias))

    optimizer_common = torch.optim.SGD([{
        'params': common_bias,
        'lr': args.lr
    }, {
        'params': common_nonbias,
        'lr': args.lr
    }],
                                       lr=args.lr,
                                       momentum=args.momentum,
                                       weight_decay=args.weight_decay,
                                       nesterov=(args.optim == 'Nesterov'))
    optimizer_sbd = torch.optim.SGD(
        [{
            'params': final1_bias,
            'lr': 20 * args.lr if args.pretrained else args.lr
        }, {
            'params': final1_nonbias,
            'lr': 10 * args.lr if args.pretrained else args.lr
        }],
        lr=args.lr,
        momentum=args.momentum,
        weight_decay=args.weight_decay,
        nesterov=(args.optim == 'Nesterov'))
    optimizer_lip = torch.optim.SGD(
        [{
            'params': final2_bias,
            'lr': 20 * args.lr if args.pretrained else args.lr
        }, {
            'params': final2_nonbias,
            'lr': 10 * args.lr if args.pretrained else args.lr
        }],
        lr=args.lr,
        momentum=args.momentum,
        weight_decay=args.weight_decay,
        nesterov=(args.optim == 'Nesterov'))

    optimizers = [optimizer_common, optimizer_sbd, optimizer_lip]

    # Setting up scheduler
    total_iters_common = (args.epochs * n_iters_per_epoch_common)
    total_iters_sbd = (args.epochs * n_iters_per_epoch_sbd)
    total_iters_lip = (args.epochs * n_iters_per_epoch_lip)
    lambda_common = lambda step: 0.5 + 0.5 * math.cos(np.pi * step /
                                                      total_iters_common)
    lambda_sbd = lambda step: 0.5 + 0.5 * math.cos(np.pi * step /
                                                   total_iters_sbd)
    lambda_lip = lambda step: 0.5 + 0.5 * math.cos(np.pi * step /
                                                   total_iters_lip)
    scheduler_common = lr_scheduler.LambdaLR(optimizer_common,
                                             lr_lambda=[lambda_common] * 2)
    scheduler_sbd = lr_scheduler.LambdaLR(optimizer_sbd,
                                          lr_lambda=[lambda_sbd] * 2)
    scheduler_lip = lr_scheduler.LambdaLR(optimizer_lip,
                                          lr_lambda=[lambda_lip] * 2)

    schedulers = [scheduler_common, scheduler_sbd, scheduler_lip]

    global l_avg, totalclasswise_pixel_acc, totalclasswise_gtpixels, totalclasswise_predpixels
    global l_avg_test, totalclasswise_pixel_acc_test, totalclasswise_gtpixels_test, totalclasswise_predpixels_test
    global steps, steps_test
    global bug_counter

    bug_counter = 0

    scheduler_common.step()
    scheduler_sbd.step()
    scheduler_lip.step()

    counter_sizes = [20, 10, 10]
    global counters
    counters = [0, 0, 0]

    criterion_sbd = nn.CrossEntropyLoss(size_average=False,
                                        ignore_index=traindata.ignore_index)
    criterion_lip = nn.CrossEntropyLoss(size_average=False,
                                        ignore_index=traindata.ignore_index)
    criterions = [criterion_sbd, criterion_lip]

    for epoch in range(args.epochs):
        print('=' * 10, 'Epoch %d' % (epoch + 1), '=' * 10)
        l_avg = [0, 0]
        totalclasswise_pixel_acc = [0, 0]
        totalclasswise_gtpixels = [0, 0]
        totalclasswise_predpixels = [0, 0]
        l_avg_test = [0, 0]
        totalclasswise_pixel_acc_test = [0, 0]
        totalclasswise_gtpixels_test = [0, 0]
        totalclasswise_predpixels_test = [0, 0]
        steps = [0, 0]
        steps_test = [0, 0]

        train(model, optimizers, criterions, trainloader, epoch, schedulers,
              traindata, counter_sizes)
        val(model, criterions, valloader, epoch, valdata)

        # save the model every 5 epochs
        if (epoch + 1) % 5 == 0 or epoch == args.epochs - 1:
            if (epoch + 1) > 5:
                os.remove(
                    os.path.join(
                        ROOT, RESULT,
                        "{}_{}_{}.pkl".format(args.arch, args.dataset,
                                              epoch - 4)))
                os.remove(
                    os.path.join(
                        ROOT, RESULT, "{}_{}_{}_optimizer0.pkl".format(
                            args.arch, args.dataset, epoch - 4)))
                os.remove(
                    os.path.join(
                        ROOT, RESULT, "{}_{}_{}_optimizer1.pkl".format(
                            args.arch, args.dataset, epoch - 4)))
                os.remove(
                    os.path.join(
                        ROOT, RESULT, "{}_{}_{}_optimizer2.pkl".format(
                            args.arch, args.dataset, epoch - 4)))
            torch.save(
                model,
                os.path.join(
                    ROOT, RESULT,
                    "{}_{}_{}.pkl".format(args.arch, args.dataset, epoch + 1)))
            torch.save(
                {
                    'state_dict': model.state_dict(),
                    'optimizer': optimizers[0].state_dict()
                },
                os.path.join(
                    ROOT, RESULT,
                    "{}_{}_{}_optimizer0.pkl".format(args.arch, args.dataset,
                                                     epoch + 1)))
            torch.save(
                {
                    'state_dict': model.state_dict(),
                    'optimizer': optimizers[1].state_dict()
                },
                os.path.join(
                    ROOT, RESULT,
                    "{}_{}_{}_optimizer1.pkl".format(args.arch, args.dataset,
                                                     epoch + 1)))
            torch.save(
                {
                    'state_dict': model.state_dict(),
                    'optimizer': optimizers[2].state_dict()
                },
                os.path.join(
                    ROOT, RESULT,
                    "{}_{}_{}_optimizer2.pkl".format(args.arch, args.dataset,
                                                     epoch + 1)))

        # remove old loss & accuracy files
        if os.path.isfile(os.path.join(ROOT, RESULT, "saved_loss.p")):
            os.remove(os.path.join(ROOT, RESULT, "saved_loss.p"))
        if os.path.isfile(os.path.join(ROOT, RESULT, "saved_accuracy.p")):
            os.remove(os.path.join(ROOT, RESULT, "saved_accuracy.p"))

        # save train and validation loss
        X.append(epoch + 1)
        Y1.append(l_avg[0] / steps[0])
        Y1_test.append(l_avg_test[0] / steps_test[0])
        Y2.append(l_avg[1] / steps[1])
        Y2_test.append(l_avg_test[1] / steps_test[1])
        saved_loss = {
            "X": X,
            "Y1": Y1,
            "Y2": Y2,
            "Y1_test": Y1_test,
            "Y2_test": Y2_test
        }
        pickle.dump(saved_loss,
                    open(os.path.join(ROOT, RESULT, "saved_loss.p"), "wb"))

        # pixel accuracy
        totalclasswise_pixel_acc[0] = totalclasswise_pixel_acc[0].reshape(
            (-1, n_classes[0])).astype(np.float32)
        totalclasswise_gtpixels[0] = totalclasswise_gtpixels[0].reshape(
            (-1, n_classes[0]))
        totalclasswise_predpixels[0] = totalclasswise_predpixels[0].reshape(
            (-1, n_classes[0]))
        totalclasswise_pixel_acc_test[0] = totalclasswise_pixel_acc_test[
            0].reshape((-1, n_classes[0])).astype(np.float32)
        totalclasswise_gtpixels_test[0] = totalclasswise_gtpixels_test[
            0].reshape((-1, n_classes[0]))
        totalclasswise_predpixels_test[0] = totalclasswise_predpixels_test[
            0].reshape((-1, n_classes[0]))

        totalclasswise_pixel_acc[1] = totalclasswise_pixel_acc[1].reshape(
            (-1, n_classes[1])).astype(np.float32)
        totalclasswise_gtpixels[1] = totalclasswise_gtpixels[1].reshape(
            (-1, n_classes[1]))
        totalclasswise_predpixels[1] = totalclasswise_predpixels[1].reshape(
            (-1, n_classes[1]))
        totalclasswise_pixel_acc_test[1] = totalclasswise_pixel_acc_test[
            1].reshape((-1, n_classes[1])).astype(np.float32)
        totalclasswise_gtpixels_test[1] = totalclasswise_gtpixels_test[
            1].reshape((-1, n_classes[1]))
        totalclasswise_predpixels_test[1] = totalclasswise_predpixels_test[
            1].reshape((-1, n_classes[1]))

        if isinstance(avg_pixel_acc, list):
            avg_pixel_acc[0] = np.vstack(
                (avg_pixel_acc[0],
                 np.sum(totalclasswise_pixel_acc[0], axis=1) /
                 np.sum(totalclasswise_gtpixels[0], axis=1)))
            mean_class_acc[0] = np.vstack(
                (mean_class_acc[0],
                 np.mean(totalclasswise_pixel_acc[0] /
                         totalclasswise_gtpixels[0],
                         axis=1)))
            mIoU[0] = np.vstack(
                (mIoU[0],
                 np.mean(
                     totalclasswise_pixel_acc[0] /
                     (totalclasswise_gtpixels[0] + totalclasswise_predpixels[0]
                      - totalclasswise_pixel_acc[0]),
                     axis=1)))
            avg_pixel_acc[1] = np.vstack(
                (avg_pixel_acc[1],
                 np.sum(totalclasswise_pixel_acc[1], axis=1) /
                 np.sum(totalclasswise_gtpixels[1], axis=1)))
            mean_class_acc[1] = np.vstack(
                (mean_class_acc[1],
                 np.mean(totalclasswise_pixel_acc[1] /
                         totalclasswise_gtpixels[1],
                         axis=1)))
            mIoU[1] = np.vstack(
                (mIoU[1],
                 np.mean(
                     totalclasswise_pixel_acc[1] /
                     (totalclasswise_gtpixels[1] + totalclasswise_predpixels[1]
                      - totalclasswise_pixel_acc[1]),
                     axis=1)))

            avg_pixel_acc_test[0] = np.vstack(
                (avg_pixel_acc_test[0],
                 np.sum(totalclasswise_pixel_acc_test[0], axis=1) /
                 np.sum(totalclasswise_gtpixels_test[0], axis=1)))
            mean_class_acc_test[0] = np.vstack(
                (mean_class_acc_test[0],
                 np.mean(totalclasswise_pixel_acc_test[0] /
                         totalclasswise_gtpixels_test[0],
                         axis=1)))
            mIoU_test[0] = np.vstack(
                (mIoU_test[0],
                 np.mean(totalclasswise_pixel_acc_test[0] /
                         (totalclasswise_gtpixels_test[0] +
                          totalclasswise_predpixels_test[0] -
                          totalclasswise_pixel_acc_test[0]),
                         axis=1)))
            avg_pixel_acc_test[1] = np.vstack(
                (avg_pixel_acc_test[1],
                 np.sum(totalclasswise_pixel_acc_test[1], axis=1) /
                 np.sum(totalclasswise_gtpixels_test[1], axis=1)))
            mean_class_acc_test[1] = np.vstack(
                (mean_class_acc_test[1],
                 np.mean(totalclasswise_pixel_acc_test[1] /
                         totalclasswise_gtpixels_test[1],
                         axis=1)))
            mIoU_test[1] = np.vstack(
                (mIoU_test[1],
                 np.mean(totalclasswise_pixel_acc_test[1] /
                         (totalclasswise_gtpixels_test[1] +
                          totalclasswise_predpixels_test[1] -
                          totalclasswise_pixel_acc_test[1]),
                         axis=1)))
        else:
            avg_pixel_acc = []
            mean_class_acc = []
            mIoU = []
            avg_pixel_acc.append(
                np.sum(totalclasswise_pixel_acc[0], axis=1) /
                np.sum(totalclasswise_gtpixels[0], axis=1))
            mean_class_acc.append(
                np.mean(totalclasswise_pixel_acc[0] /
                        totalclasswise_gtpixels[0],
                        axis=1))
            mIoU.append(
                np.mean(
                    totalclasswise_pixel_acc[0] /
                    (totalclasswise_gtpixels[0] + totalclasswise_predpixels[0]
                     - totalclasswise_pixel_acc[0]),
                    axis=1))
            avg_pixel_acc.append(
                np.sum(totalclasswise_pixel_acc[1], axis=1) /
                np.sum(totalclasswise_gtpixels[1], axis=1))
            mean_class_acc.append(
                np.mean(totalclasswise_pixel_acc[1] /
                        totalclasswise_gtpixels[1],
                        axis=1))
            mIoU.append(
                np.mean(
                    totalclasswise_pixel_acc[1] /
                    (totalclasswise_gtpixels[1] + totalclasswise_predpixels[1]
                     - totalclasswise_pixel_acc[1]),
                    axis=1))

            avg_pixel_acc_test = []
            mean_class_acc_test = []
            mIoU_test = []
            avg_pixel_acc_test.append(
                np.sum(totalclasswise_pixel_acc_test[0], axis=1) /
                np.sum(totalclasswise_gtpixels_test[0], axis=1))
            mean_class_acc_test.append(
                np.mean(totalclasswise_pixel_acc_test[0] /
                        totalclasswise_gtpixels_test[0],
                        axis=1))
            mIoU_test.append(
                np.mean(totalclasswise_pixel_acc_test[0] /
                        (totalclasswise_gtpixels_test[0] +
                         totalclasswise_predpixels_test[0] -
                         totalclasswise_pixel_acc_test[0]),
                        axis=1))
            avg_pixel_acc_test.append(
                np.sum(totalclasswise_pixel_acc_test[1], axis=1) /
                np.sum(totalclasswise_gtpixels_test[1], axis=1))
            mean_class_acc_test.append(
                np.mean(totalclasswise_pixel_acc_test[1] /
                        totalclasswise_gtpixels_test[1],
                        axis=1))
            mIoU_test.append(
                np.mean(totalclasswise_pixel_acc_test[1] /
                        (totalclasswise_gtpixels_test[1] +
                         totalclasswise_predpixels_test[1] -
                         totalclasswise_pixel_acc_test[1]),
                        axis=1))

        saved_accuracy = {
            "X": X,
            "P1": avg_pixel_acc[0],
            "P2": avg_pixel_acc[1],
            "M1": mean_class_acc[0],
            "M2": mean_class_acc[1],
            "I1": mIoU[0],
            "I2": mIoU[1],
            "P1_test": avg_pixel_acc_test[0],
            "P2_test": avg_pixel_acc_test[1],
            "M1_test": mean_class_acc_test[0],
            "M2_test": mean_class_acc_test[1],
            "I1_test": mIoU_test[0],
            "I2_test": mIoU_test[1]
        }
        pickle.dump(saved_accuracy,
                    open(os.path.join(ROOT, RESULT, "saved_accuracy.p"), "wb"))

        # print validation mIoU of both tasks
        this_mIoU1 = np.mean(totalclasswise_pixel_acc_test[0] /
                             (totalclasswise_gtpixels_test[0] +
                              totalclasswise_predpixels_test[0] -
                              totalclasswise_pixel_acc_test[0]),
                             axis=1)[0]
        this_mIoU2 = np.mean(totalclasswise_pixel_acc_test[1] /
                             (totalclasswise_gtpixels_test[1] +
                              totalclasswise_predpixels_test[1] -
                              totalclasswise_pixel_acc_test[1]),
                             axis=1)[0]
        print('Val: mIoU_sbd = {}, mIoU_lip = {}'.format(
            this_mIoU1, this_mIoU2))
Exemplo n.º 8
0
    'checkpoint': '',
    'save_interval': 10000,
    'rotate_degree': 10,
    'deep_base': True,
    'aux_weight': 0.1,
    'print_aux': True,
}

ckpt_path = './ckpt'
exp_name = 'model'

img_transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize([0.3598, 0.3653, 0.3662], [0.2573, 0.2663, 0.2756])
])
mask_transform = extend_transforms.MaskToTensor()

train_joint_transform = extend_transforms.Compose([
    extend_transforms.RandomScale(),
    extend_transforms.RandomSizedRatio(760, 842, 274, 304),
    extend_transforms.RandomRotate(args['rotate_degree']),
    extend_transforms.RandomCrop(args['train_crop_size']),
])

train_set = culane.CULANE('train',
                          joint_transform=train_joint_transform,
                          transform=img_transform,
                          mask_transform=mask_transform)
train_loader = DataLoader(train_set,
                          batch_size=args['train_batch_size'],
                          num_workers=10,
Exemplo n.º 9
0
def main(args):
    print('=' * 10, 'Starting', '=' * 10, '\n')
    print(device)

    # Set the seed for reproducing the results
    random.seed(args.manual_seed)
    np.random.seed(args.manual_seed)
    torch.manual_seed(args.manual_seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(args.manual_seed)
        cudnn.benchmark = True

    # Set up results folder
    if not os.path.exists(
            os.path.join(ROOT_ADDRESS, 'results_parts/saved_val_images')):
        os.makedirs(
            os.path.join(ROOT_ADDRESS, 'results_parts/saved_val_images'))
    if not os.path.exists(
            os.path.join(ROOT_ADDRESS, 'results_parts/saved_train_images')):
        os.makedirs(
            os.path.join(ROOT_ADDRESS, 'results_parts/saved_train_images'))

    # Setup Dataloader
    data_loader = get_loader(args.dataset)
    # data_path = get_data_path(args.dataset)

    # traindata = data_loader(data_path, split=args.split, is_transform=True, img_size=(args.img_rows, args.img_cols))
    # trainloader = data.DataLoader(traindata, batch_size=args.batch_size, num_workers=7, shuffle=True)
    # valdata = data_loader(data_path, split="val", is_transform=False, img_size=(args.img_rows, args.img_cols))
    # valloader = data.DataLoader(valdata, batch_size=args.batch_size, num_workers=7, shuffle=False)

    mean_std = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    input_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])
    target_transform = extended_transforms.MaskToTensor()

    traindata = data_loader('train',
                            transform=input_transform,
                            target_transform=target_transform,
                            do_transform=True)
    trainloader = data.DataLoader(traindata,
                                  batch_size=args.batch_size,
                                  num_workers=1,
                                  shuffle=True)
    valdata = data_loader('val',
                          transform=input_transform,
                          target_transform=target_transform)
    valloader = data.DataLoader(valdata,
                                batch_size=args.batch_size,
                                num_workers=1,
                                shuffle=False)

    n_classes = traindata.n_classes
    n_trainsamples = len(traindata)
    n_iters_per_epoch = np.ceil(n_trainsamples /
                                float(args.batch_size * args.iter_size))

    # Setup Model
    model = get_model(args.arch,
                      n_classes,
                      ignore_index=traindata.ignore_index,
                      output_stride=args.output_stride).to(device)

    epochs_done = 0
    X = []
    Y = []
    Y_test = []
    avg_pixel_acc = 0
    mean_class_acc = 0
    mIoU = 0
    avg_pixel_acc_test = 0
    mean_class_acc_test = 0
    mIoU_test = 0
    best_mIoU = 0
    best_epoch = 0

    if args.model_path:
        model_name = args.model_path.split('.')
        checkpoint_name = model_name[0] + '_optimizer.pkl'
        checkpoint = torch.load(checkpoint_name)
        optm = checkpoint['optimizer']
        model.load_state_dict(checkpoint['state_dict'])
        split_str = model_name[0].split('_')
        epochs_done = int(split_str[-1])
        saved_loss = pickle.load(
            open(os.path.join(ROOT_ADDRESS, "results_parts/saved_loss.p"),
                 "rb"))
        saved_accuracy = pickle.load(
            open(os.path.join(ROOT_ADDRESS, "results_parts/saved_accuracy.p"),
                 "rb"))
        X = saved_loss["X"][:epochs_done]
        Y = saved_loss["Y"][:epochs_done]
        Y_test = saved_loss["Y_test"][:epochs_done]
        avg_pixel_acc = saved_accuracy["P"][:epochs_done, :]
        mean_class_acc = saved_accuracy["M"][:epochs_done, :]
        mIoU = saved_accuracy["I"][:epochs_done, :]
        avg_pixel_acc_test = saved_accuracy["P_test"][:epochs_done, :]
        mean_class_acc_test = saved_accuracy["M_test"][:epochs_done, :]
        mIoU_test = saved_accuracy["I_test"][:epochs_done, :]

    if args.best_model_path:
        best_model_name = args.best_model_path.split('.')[0].split('_')
        best_mIoU = float(best_model_name[-2])
        best_epoch = int(best_model_name[-3])

    # Learning rates: For new layers (such as final layer), we set lr to be 10x the learning rate of layers already trained
    bias_10x_params = filter(
        lambda x: ('bias' in x[0]) and ('final' in x[0]) and ('conv' in x[0]),
        model.named_parameters())
    bias_10x_params = list(map(lambda x: x[1], bias_10x_params))

    bias_params = filter(lambda x: ('bias' in x[0]) and ('final' not in x[0]),
                         model.named_parameters())
    bias_params = list(map(lambda x: x[1], bias_params))

    nonbias_10x_params = filter(
        lambda x:
        (('bias' not in x[0]) or ('bn' in x[0])) and ('final' in x[0]),
        model.named_parameters())
    nonbias_10x_params = list(map(lambda x: x[1], nonbias_10x_params))

    nonbias_params = filter(
        lambda x: ('bias' not in x[0]) and ('final' not in x[0]),
        model.named_parameters())
    nonbias_params = list(map(lambda x: x[1], nonbias_params))

    optimizer = torch.optim.SGD([
        {
            'params': bias_params,
            'lr': args.lr
        },
        {
            'params': bias_10x_params,
            'lr': args.lr
        },
        {
            'params': nonbias_10x_params,
            'lr': args.lr
        },
        {
            'params': nonbias_params,
            'lr': args.lr
        },
    ],
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=(args.optim == 'Nesterov'))
    num_param_groups = 4

    # Setting up scheduler
    if args.model_path and args.restore:
        # Here we restore all states of optimizer
        optimizer.load_state_dict(optm)
        total_iters = n_iters_per_epoch * args.epochs
        lambda1 = lambda step: 0.5 + 0.5 * math.cos(np.pi * step / total_iters)
        scheduler = lr_scheduler.LambdaLR(
            optimizer,
            lr_lambda=[lambda1] * num_param_groups,
            last_epoch=epochs_done * n_iters_per_epoch)
    else:
        # Here we simply restart the training
        if args.T0:
            total_iters = args.T0 * n_iters_per_epoch
        else:
            total_iters = ((args.epochs - epochs_done) * n_iters_per_epoch)
        lambda1 = lambda step: 0.5 + 0.5 * math.cos(np.pi * step / total_iters)
        scheduler = lr_scheduler.LambdaLR(optimizer,
                                          lr_lambda=[lambda1] *
                                          num_param_groups)

    global l_avg, totalclasswise_pixel_acc, totalclasswise_gtpixels, totalclasswise_predpixels
    global l_avg_test, totalclasswise_pixel_acc_test, totalclasswise_gtpixels_test, totalclasswise_predpixels_test
    global steps, steps_test

    scheduler.step()

    criterion = nn.CrossEntropyLoss(size_average=False,
                                    ignore_index=traindata.ignore_index)

    print('=' * 10, 'Entering epoch loop', '=' * 10, '\n')
    for epoch in range(epochs_done, args.epochs):
        print('=' * 10, 'Epoch %d' % (epoch + 1), '=' * 10)
        l_avg = 0
        totalclasswise_pixel_acc = 0
        totalclasswise_gtpixels = 0
        totalclasswise_predpixels = 0
        l_avg_test = 0
        totalclasswise_pixel_acc_test = 0
        totalclasswise_gtpixels_test = 0
        totalclasswise_predpixels_test = 0
        steps = 0
        steps_test = 0

        train(model, optimizer, criterion, trainloader, epoch, scheduler,
              traindata)
        val(model, criterion, valloader, epoch, valdata)

        # save the model every 5 epochs
        if (epoch + 1) % 5 == 0 or epoch == args.epochs - 1:
            if (epoch + 1) > 5:
                os.remove(
                    os.path.join(
                        ROOT_ADDRESS, "results_parts/{}_{}_{}.pkl".format(
                            args.arch, args.dataset, epoch - 4)))
                os.remove(
                    os.path.join(
                        ROOT_ADDRESS,
                        "results_parts/{}_{}_{}_optimizer.pkl".format(
                            args.arch, args.dataset, epoch - 4)))
            torch.save(
                model,
                os.path.join(
                    ROOT_ADDRESS, "results_parts/{}_{}_{}.pkl".format(
                        args.arch, args.dataset, epoch + 1)))
            torch.save(
                {
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict()
                },
                os.path.join(
                    ROOT_ADDRESS,
                    "results_parts/{}_{}_{}_optimizer.pkl".format(
                        args.arch, args.dataset, epoch + 1)))

        # remove old loss & accuracy files
        if os.path.isfile(
                os.path.join(ROOT_ADDRESS, "results_parts/saved_loss.p")):
            os.remove(os.path.join(ROOT_ADDRESS, "results_parts/saved_loss.p"))
        if os.path.isfile(
                os.path.join(ROOT_ADDRESS, "results_parts/saved_accuracy.p")):
            os.remove(
                os.path.join(ROOT_ADDRESS, "results_parts/saved_accuracy.p"))

        # save train and validation loss
        X.append(epoch + 1)
        Y.append(l_avg / steps)
        Y_test.append(l_avg_test / steps_test)
        saved_loss = {"X": X, "Y": Y, "Y_test": Y_test}
        pickle.dump(
            saved_loss,
            open(os.path.join(ROOT_ADDRESS, "results_parts/saved_loss.p"),
                 "wb"))

        # pixel accuracy
        totalclasswise_pixel_acc = totalclasswise_pixel_acc.reshape(
            (-1, n_classes)).astype(np.float32)
        totalclasswise_gtpixels = totalclasswise_gtpixels.reshape(
            (-1, n_classes))
        totalclasswise_predpixels = totalclasswise_predpixels.reshape(
            (-1, n_classes))
        totalclasswise_pixel_acc_test = totalclasswise_pixel_acc_test.reshape(
            (-1, n_classes)).astype(np.float32)
        totalclasswise_gtpixels_test = totalclasswise_gtpixels_test.reshape(
            (-1, n_classes))
        totalclasswise_predpixels_test = totalclasswise_predpixels_test.reshape(
            (-1, n_classes))

        if isinstance(avg_pixel_acc, np.ndarray):
            avg_pixel_acc = np.vstack(
                (avg_pixel_acc, np.sum(totalclasswise_pixel_acc, axis=1) /
                 np.sum(totalclasswise_gtpixels, axis=1)))
            mean_class_acc = np.vstack(
                (mean_class_acc,
                 np.mean(totalclasswise_pixel_acc / totalclasswise_gtpixels,
                         axis=1)))
            mIoU = np.vstack(
                (mIoU,
                 np.mean(totalclasswise_pixel_acc /
                         (totalclasswise_gtpixels + totalclasswise_predpixels -
                          totalclasswise_pixel_acc),
                         axis=1)))

            avg_pixel_acc_test = np.vstack(
                (avg_pixel_acc_test,
                 np.sum(totalclasswise_pixel_acc_test, axis=1) /
                 np.sum(totalclasswise_gtpixels_test, axis=1)))
            mean_class_acc_test = np.vstack(
                (mean_class_acc_test,
                 np.mean(totalclasswise_pixel_acc_test /
                         totalclasswise_gtpixels_test,
                         axis=1)))
            mIoU_test = np.vstack((mIoU_test,
                                   np.mean(totalclasswise_pixel_acc_test /
                                           (totalclasswise_gtpixels_test +
                                            totalclasswise_predpixels_test -
                                            totalclasswise_pixel_acc_test),
                                           axis=1)))
        else:
            avg_pixel_acc = np.sum(totalclasswise_pixel_acc, axis=1) / np.sum(
                totalclasswise_gtpixels, axis=1)
            mean_class_acc = np.mean(totalclasswise_pixel_acc /
                                     totalclasswise_gtpixels,
                                     axis=1)
            mIoU = np.mean(
                totalclasswise_pixel_acc /
                (totalclasswise_gtpixels + totalclasswise_predpixels -
                 totalclasswise_pixel_acc),
                axis=1)

            avg_pixel_acc_test = np.sum(
                totalclasswise_pixel_acc_test, axis=1) / np.sum(
                    totalclasswise_gtpixels_test, axis=1)
            mean_class_acc_test = np.mean(totalclasswise_pixel_acc_test /
                                          totalclasswise_gtpixels_test,
                                          axis=1)
            mIoU_test = np.mean(
                totalclasswise_pixel_acc_test /
                (totalclasswise_gtpixels_test + totalclasswise_predpixels_test
                 - totalclasswise_pixel_acc_test),
                axis=1)

        saved_accuracy = {
            "X": X,
            "P": avg_pixel_acc,
            "M": mean_class_acc,
            "I": mIoU,
            "P_test": avg_pixel_acc_test,
            "M_test": mean_class_acc_test,
            "I_test": mIoU_test
        }
        pickle.dump(
            saved_accuracy,
            open(os.path.join(ROOT_ADDRESS, "results_parts/saved_accuracy.p"),
                 "wb"))

        # save the best model
        this_mIoU = np.mean(
            totalclasswise_pixel_acc_test /
            (totalclasswise_gtpixels_test + totalclasswise_predpixels_test -
             totalclasswise_pixel_acc_test),
            axis=1)[0]
        if this_mIoU > best_mIoU:
            if best_mIoU > 0:
                os.remove(
                    os.path.join(
                        ROOT_ADDRESS,
                        "results_parts/{}_{}_{}_{}_best.pkl".format(
                            args.arch, args.dataset, best_epoch,
                            float2str(best_mIoU))))
                os.remove(
                    os.path.join(
                        ROOT_ADDRESS,
                        "results_parts/{}_{}_{}_{}_optimizer_best.pkl".format(
                            args.arch, args.dataset, best_epoch,
                            float2str(best_mIoU))))
            best_mIoU = this_mIoU
            best_epoch = epoch + 1
            torch.save(
                model,
                os.path.join(
                    ROOT_ADDRESS, "results_parts/{}_{}_{}_{}_best.pkl".format(
                        args.arch, args.dataset, best_epoch,
                        float2str(best_mIoU))))
            torch.save(
                {
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict()
                },
                os.path.join(
                    ROOT_ADDRESS,
                    "results_parts/{}_{}_{}_{}_optimizer_best.pkl".format(
                        args.arch, args.dataset, best_epoch,
                        float2str(best_mIoU))))