Пример #1
0
# trainloader = data.DataLoader(CSDataSet(args.data_dir, './dataset/list/cityscapes/train.lst', max_iters=args.num_steps*args.batch_size, crop_size=(h, w),
#                 scale=args.random_scale, mirror=args.random_mirror, mean=IMG_MEAN),
#                 batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True)


class DataLoaderX(data.DataLoader):
    def __iter__(self):
        return BackgroundGenerator(super().__iter__())


dataloader = DataLoaderX  # 2:10
# dataloader = data.DataLoader #

valset = CSDataSet(args.data_dir,
                   './dataset/list/cityscapes/val.lst',
                   crop_size=(1024, 2048),
                   mean=IMG_MEAN,
                   scale=False,
                   mirror=False)

valloader = dataloader(valset,
                       num_workers=4,
                       batch_size=1,
                       shuffle=False,
                       pin_memory=True)

trainset = CSDataSet(args.data_dir,
                     './dataset/list/cityscapes/train.lst',
                     crop_size=(1024, 2048),
                     mean=IMG_MEAN,
                     scale=False,
                     mirror=False)
Пример #2
0
def main():
    """Create the model and start the training."""
    parser = get_parser()

    with Engine(custom_parser=parser) as engine:
        args = parser.parse_args()

        cudnn.benchmark = True
        seed = args.random_seed
        if engine.distributed:
            seed = engine.local_rank
        torch.manual_seed(seed)
        if torch.cuda.is_available():
            torch.cuda.manual_seed(seed)

        # data loader
        h, w = map(int, args.input_size.split(','))
        input_size = (h, w)
        dataset = CSDataSet(args.data_dir,
                            args.data_list,
                            max_iters=None,
                            crop_size=input_size,
                            scale=args.random_scale,
                            mirror=args.random_mirror,
                            mean=IMG_MEAN)
        train_loader, train_sampler = engine.get_train_loader(dataset)

        # config network and criterion
        if args.ohem:
            criterion = CriterionOhemDSN(thresh=args.ohem_thres,
                                         min_kept=args.ohem_keep)
        else:
            criterion = CriterionDSN()  #CriterionCrossEntropy()

        # model = Res_Deeplab(args.num_classes, criterion=criterion,
        #         pretrained_model=args.restore_from)
        seg_model = eval('networks.' + args.model + '.Seg_Model')(
            num_classes=args.num_classes,
            criterion=criterion,
            pretrained_model=args.restore_from,
            recurrence=args.recurrence)
        # seg_model.init_weights()

        # group weight and config optimizer
        optimizer = optim.SGD([{
            'params':
            filter(lambda p: p.requires_grad, seg_model.parameters()),
            'lr':
            args.learning_rate
        }],
                              lr=args.learning_rate,
                              momentum=args.momentum,
                              weight_decay=args.weight_decay)
        optimizer.zero_grad()

        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        seg_model.cuda()  #seg_model.to(device)

        model = torch.nn.DataParallel(
            seg_model)  #engine.data_parallel(seg_model)
        model.train()

        if not os.path.exists(args.snapshot_dir):
            os.makedirs(args.snapshot_dir)

        run = True
        global_iteration = args.start_iters
        avgloss = 0

        while run:
            epoch = global_iteration // len(train_loader)
            if engine.distributed:
                train_sampler.set_epoch(epoch)

            bar_format = '{desc}[{elapsed}<{remaining},{rate_fmt}]'
            pbar = tqdm(range(len(train_loader)),
                        file=sys.stdout,
                        bar_format=bar_format)
            dataloader = iter(train_loader)
            print('start')

            for idx in pbar:
                global_iteration += 1

                images, labels, _, _ = dataloader.next()
                images = images.cuda()  #non_blocking=True
                labels = labels.long().cuda()

                optimizer.zero_grad()
                lr = adjust_learning_rate(optimizer, args.learning_rate,
                                          global_iteration - 1, args.num_steps,
                                          args.power)
                print('lr', lr)
                loss = model(images, labels).mean()

                reduce_loss = loss.data  #engine.all_reduce_tensor(loss)
                loss.backward()
                optimizer.step()


                print_str = 'Epoch{}/Iters{}'.format(epoch, global_iteration) \
                        + ' Iter{}/{}:'.format(idx + 1, len(train_loader)) \
                        + ' lr=%.2e' % lr \
                        + ' loss=%.2f' % reduce_loss.item()

                pbar.set_description(print_str, refresh=False)

                if (not engine.distributed) or (engine.distributed
                                                and engine.local_rank == 0):
                    if global_iteration % args.save_pred_every == 0 or global_iteration >= args.num_steps:
                        print('taking snapshot ...')
                        torch.save(
                            seg_model.state_dict(),
                            osp.join(
                                args.snapshot_dir,
                                'CS_scenes_' + str(global_iteration) + '.pth'))

                if global_iteration >= args.num_steps:
                    run = False
                    break
Пример #3
0
def main():
    """Create the model and start the evaluation process."""
    parser = get_parser()

    with Engine(custom_parser=parser) as engine:
        args = parser.parse_args()

        cudnn.benchmark = True

        h, w = map(int, args.input_size.split(','))
        if args.whole:
            input_size = (1024, 2048)
        else:
            input_size = (h, w)

        seg_model = eval('networks.' + args.model + '.Seg_Model')(
            num_classes=args.num_classes, recurrence=args.recurrence)

        load_model(seg_model, args.restore_from)

        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        seg_model.to(device)

        model = engine.data_parallel(seg_model)
        model.eval()

        dataset = CSDataSet(args.data_dir,
                            args.data_list,
                            crop_size=(1024, 2048),
                            mean=IMG_MEAN,
                            scale=False,
                            mirror=False)
        test_loader, test_sampler = engine.get_test_loader(dataset)

        if engine.distributed:
            test_sampler.set_epoch(0)

        data_list = []
        confusion_matrix = np.zeros((args.num_classes, args.num_classes))
        palette = get_palette(256)

        save_path = os.path.join(os.path.dirname(args.restore_from), 'outputs')
        if not os.path.exists(save_path):
            os.makedirs(save_path)

        bar_format = '{desc}[{elapsed}<{remaining},{rate_fmt}]'
        pbar = tqdm(range(len(test_loader)),
                    file=sys.stdout,
                    bar_format=bar_format)
        dataloader = iter(test_loader)

        for idx in pbar:
            image, label, size, name = dataloader.next()
            size = size[0].numpy()
            with torch.no_grad():
                output = predict_multiscale(model, image, input_size, [1.0],
                                            args.num_classes, False, 0)

            seg_pred = np.asarray(np.argmax(output, axis=3), dtype=np.uint8)
            seg_gt = np.asarray(label.numpy()[:, :size[0], :size[1]],
                                dtype=np.int)

            for i in range(image.size(0)):
                output_im = PILImage.fromarray(seg_pred[i])
                output_im.putpalette(palette)
                output_im.save(os.path.join(save_path, name[i] + '.png'))

            ignore_index = seg_gt != 255
            seg_gt = seg_gt[ignore_index]
            seg_pred = seg_pred[ignore_index]
            # show_all(gt, output)
            confusion_matrix += get_confusion_matrix(seg_gt, seg_pred,
                                                     args.num_classes)

            print_str = ' Iter{}/{}'.format(idx + 1, len(test_loader))
            pbar.set_description(print_str, refresh=False)

        confusion_matrix = torch.from_numpy(
            confusion_matrix).contiguous().cuda()
        confusion_matrix = engine.all_reduce_tensor(confusion_matrix,
                                                    norm=False).cpu().numpy()
        pos = confusion_matrix.sum(1)
        res = confusion_matrix.sum(0)
        tp = np.diag(confusion_matrix)

        IU_array = (tp / np.maximum(1.0, pos + res - tp))
        mean_IU = IU_array.mean()

        # getConfusionMatrixPlot(confusion_matrix)
        if engine.distributed and engine.local_rank == 0:
            print({'meanIU': mean_IU, 'IU_array': IU_array})
            model_path = os.path.dirname(args.restore_from)
            with open(os.path.join(model_path, 'result.txt'), 'w') as f:
                f.write(
                    json.dumps({
                        'meanIU': mean_IU,
                        'IU_array': IU_array.tolist()
                    }))
Пример #4
0
#         # except StopIteration:
#         #     self.next_input = None
#         #     self.next_target = None
#         #     return
#         # with torch.cuda.stream(self.stream):
#         #     self.next_input = self.next_input.cuda(non_blocking=True)
#         #     self.next_target = self.next_target.cuda(non_blocking=True)
#         #     # With Amp, it isn't necessary to manually convert data to half.
#         #     # if args.fp16:
#         #     #     self.next_input = self.next_input.half()
#         #     # else:

trainset = CSDataSet(args.data_dir,
                     './dataset/list/cityscapes/train.lst',
                     max_iters=args.num_steps * args.batch_size,
                     crop_size=(h, w),
                     mean=IMG_MEAN,
                     scale=args.random_scale,
                     mirror=args.random_mirror)
valset = CSDataSet(args.data_dir,
                   './dataset/list/cityscapes/val.lst',
                   crop_size=(1024, 2048),
                   mean=IMG_MEAN,
                   scale=False,
                   mirror=False)


def _init_fn(worker_id):
    np.random.seed(seed)

Пример #5
0
def main():
    """Create the model and start the evaluation process."""
    args = get_arguments()

    # gpu0 = args.gpu
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    h, w = map(int, args.input_size.split(','))
    if args.whole:
        input_size = (1024, 2048)
    else:
        input_size = (h, w)

    model = Res_Deeplab(num_classes=args.num_classes)

    saved_state_dict = torch.load(args.restore_from)
    model.load_state_dict(saved_state_dict)

    model.eval()
    model.cuda()

    testloader = data.DataLoader(CSDataSet(args.data_dir,
                                           args.data_list,
                                           crop_size=(1024, 2048),
                                           mean=IMG_MEAN,
                                           scale=False,
                                           mirror=False),
                                 batch_size=1,
                                 shuffle=False,
                                 pin_memory=True)

    data_list = []
    confusion_matrix = np.zeros((args.num_classes, args.num_classes))
    palette = get_palette(256)
    interp = nn.Upsample(size=(1024, 2048),
                         mode='bilinear',
                         align_corners=True)

    if not os.path.exists('outputs'):
        os.makedirs('outputs')

    for index, batch in enumerate(testloader):
        if index % 100 == 0:
            print('%d processd' % (index))
        image, label, size, name = batch
        size = size[0].numpy()
        with torch.no_grad():
            if args.whole:
                output = predict_multiscale(model, image, input_size,
                                            [0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
                                            args.num_classes, True,
                                            args.recurrence)
            else:
                output = predict_sliding(model, image.numpy(), input_size,
                                         args.num_classes, True,
                                         args.recurrence)
        # padded_prediction = model(Variable(image, volatile=True).cuda())
        # output = interp(padded_prediction).cpu().data[0].numpy().transpose(1,2,0)
        seg_pred = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
        output_im = PILImage.fromarray(seg_pred)
        output_im.putpalette(palette)
        output_im.save('outputs/' + name[0] + '.png')

        seg_gt = np.asarray(label[0].numpy()[:size[0], :size[1]], dtype=np.int)

        ignore_index = seg_gt != 255
        seg_gt = seg_gt[ignore_index]
        seg_pred = seg_pred[ignore_index]
        # show_all(gt, output)
        confusion_matrix += get_confusion_matrix(seg_gt, seg_pred,
                                                 args.num_classes)

    pos = confusion_matrix.sum(1)
    res = confusion_matrix.sum(0)
    tp = np.diag(confusion_matrix)

    IU_array = (tp / np.maximum(1.0, pos + res - tp))
    mean_IU = IU_array.mean()

    # getConfusionMatrixPlot(confusion_matrix)
    print({'meanIU': mean_IU, 'IU_array': IU_array})
    with open('result.txt', 'w') as f:
        f.write(json.dumps({'meanIU': mean_IU, 'IU_array': IU_array.tolist()}))
def main():
    """Create the model and start the training."""
    writer = SummaryWriter(args.snapshot_dir)
    
    if not args.gpu == 'None':
        os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu
    h, w = map(int, args.input_size.split(','))
    input_size = (h, w)

    cudnn.enabled = True

    # Create network.
    deeplab = Res_Deeplab(num_classes=args.num_classes)
    print(deeplab)

    saved_state_dict = torch.load(args.restore_from)
    new_params = deeplab.state_dict().copy()
    for i in saved_state_dict:
        #Scale.layer5.conv2d_list.3.weight
        i_parts = i.split('.')
        # print i_parts
        # if not i_parts[1]=='layer5':
        if not i_parts[0]=='fc':
            new_params['.'.join(i_parts[0:])] = saved_state_dict[i] 
    
    deeplab.load_state_dict(new_params)


    model = DataParallelModel(deeplab)
    model.train()
    model.float()
    # model.apply(set_bn_momentum)
    model.cuda()    

    if args.ohem:
        criterion = CriterionOhemDSN(thresh=args.ohem_thres, min_kept=args.ohem_keep)
    else:
        criterion = CriterionDSN() #CriterionCrossEntropy()
    criterion = DataParallelCriterion(criterion)
    criterion.cuda()
    
    cudnn.benchmark = True

    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)


    trainloader = data.DataLoader(CSDataSet(args.data_dir, args.data_list, max_iters=args.num_steps*args.batch_size, crop_size=input_size, 
                    scale=args.random_scale, mirror=args.random_mirror, mean=IMG_MEAN), 
                    batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True)

    optimizer = optim.SGD([{'params': filter(lambda p: p.requires_grad, deeplab.parameters()), 'lr': args.learning_rate }], 
                lr=args.learning_rate, momentum=args.momentum,weight_decay=args.weight_decay)
    optimizer.zero_grad()

    interp = nn.Upsample(size=input_size, mode='bilinear', align_corners=True)


    for i_iter, batch in enumerate(trainloader):
        i_iter += args.start_iters
        images, labels, _, _ = batch
        images = images.cuda()
        labels = labels.long().cuda()
        if torch_ver == "0.3":
            images = Variable(images)
            labels = Variable(labels)

        optimizer.zero_grad()
        lr = adjust_learning_rate(optimizer, i_iter)
        preds = model(images)

        loss = criterion(preds, labels)
        loss.backward()
        optimizer.step()

        if i_iter % 100 == 0:
            writer.add_scalar('learning_rate', lr, i_iter)
            writer.add_scalar('loss', loss.data.cpu().numpy(), i_iter)

        # if i_iter % 5000 == 0:
        #     images_inv = inv_preprocess(images, args.save_num_images, IMG_MEAN)
        #     labels_colors = decode_labels(labels, args.save_num_images, args.num_classes)
        #     if isinstance(preds, list):
        #         preds = preds[0]
        #     preds_colors = decode_predictions(preds, args.save_num_images, args.num_classes)
        #     for index, (img, lab) in enumerate(zip(images_inv, labels_colors)):
        #         writer.add_image('Images/'+str(index), img, i_iter)
        #         writer.add_image('Labels/'+str(index), lab, i_iter)
        #         writer.add_image('preds/'+str(index), preds_colors[index], i_iter)

        print('iter = {} of {} completed, loss = {}'.format(i_iter, args.num_steps, loss.data.cpu().numpy()))

        if i_iter >= args.num_steps-1:
            print('save model ...')
            torch.save(deeplab.state_dict(),osp.join(args.snapshot_dir, 'CS_scenes_'+str(args.num_steps)+'.pth'))
            break

        if i_iter % args.save_pred_every == 0:
            print('taking snapshot ...')
            torch.save(deeplab.state_dict(),osp.join(args.snapshot_dir, 'CS_scenes_'+str(i_iter)+'.pth'))     

    end = timeit.default_timer()
    print(end-start,'seconds')