Esempio n. 1
0
def worker(cfg, gpu_id, start_idx, end_idx, result_queue):
    torch.cuda.set_device(gpu_id)

    # Dataset and Loader
    dataset_val = ValDataset(cfg.DATASET.root_dataset,
                             cfg.DATASET.list_val,
                             cfg.DATASET,
                             start_idx=start_idx,
                             end_idx=end_idx)
    loader_val = torch.utils.data.DataLoader(dataset_val,
                                             batch_size=cfg.VAL.batch_size,
                                             shuffle=False,
                                             collate_fn=user_scattered_collate,
                                             num_workers=2)

    # Network Builders
    net_enc_query = ModelBuilder.build_encoder(
        arch=cfg.MODEL.arch_encoder.lower(),
        fc_dim=cfg.MODEL.fc_dim,
        weights=cfg.MODEL.weights_enc_query)
    net_enc_memory = ModelBuilder.build_encoder_memory(
        arch=cfg.MODEL.arch_encoder.lower(),
        fc_dim=cfg.MODEL.fc_dim,
        weights=cfg.MODEL.weights_enc_memory,
        num_class=cfg.DATASET.num_class)
    net_att_query = ModelBuilder.build_encoder(
        arch='attention',
        fc_dim=cfg.MODEL.fc_dim,
        weights=cfg.MODEL.weights_att_query)
    net_att_memory = ModelBuilder.build_encoder(
        arch='attention',
        fc_dim=cfg.MODEL.fc_dim,
        weights=cfg.MODEL.weights_att_memory)
    net_decoder = ModelBuilder.build_decoder(
        arch=cfg.MODEL.arch_decoder.lower(),
        fc_dim=cfg.MODEL.fc_dim,
        num_class=cfg.DATASET.num_class,
        weights=cfg.MODEL.weights_decoder,
        use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationAttentionModule(
        net_enc_query,
        net_enc_memory,
        net_att_query,
        net_att_memory,
        net_decoder,
        crit,
        normalize_key=cfg.MODEL.normalize_key,
        p_scalar=cfg.MODEL.p_scalar)

    segmentation_module.cuda()

    # Main loop
    evaluate(segmentation_module, loader_val, cfg, gpu_id, result_queue)
Esempio n. 2
0
def main(cfg, gpu, args, progress):
    gpu_flag = args.gpu_flag
    if gpu_flag and torch.cuda.is_available():
        torch.cuda.set_device(gpu)
        print('使用GPU进行语义分割')
    else:
        print('未开启GPU或未安装CUDA环境,设置使用CPU进行语义分割')

    # Network Builders
    if gpu_flag:
        net_encoder = ModelBuilder.build_encoder(
            arch=cfg.MODEL.arch_encoder,
            fc_dim=cfg.MODEL.fc_dim,
            weights=cfg.MODEL.weights_encoder)
        net_decoder = ModelBuilder.build_decoder(
            arch=cfg.MODEL.arch_decoder,
            fc_dim=cfg.MODEL.fc_dim,
            num_class=cfg.DATASET.num_class,
            weights=cfg.MODEL.weights_decoder,
            use_softmax=True)
    else:
        net_encoder = ModelBuilder.build_encoder(
            arch=cfg.MODEL.arch_encoder,
            fc_dim=cfg.MODEL.fc_dim,
            weights=cfg.MODEL.weights_encoder,
            gpu_flag=False)
        net_decoder = ModelBuilder.build_decoder(
            arch=cfg.MODEL.arch_decoder,
            fc_dim=cfg.MODEL.fc_dim,
            num_class=cfg.DATASET.num_class,
            weights=cfg.MODEL.weights_decoder,
            use_softmax=True,
            gpu_flag=False)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    # Dataset and Loader
    dataset_test = TestDataset(cfg.list_test, cfg.DATASET)
    loader_test = torch.utils.data.DataLoader(
        dataset_test,
        batch_size=cfg.TEST.batch_size,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=5,
        drop_last=True)
    if gpu_flag:
        segmentation_module.cuda()

    # Main loop
    test(segmentation_module, loader_test, gpu, gpu_flag, args, progress)

    print('语义分割处理完成!')
Esempio n. 3
0
def main(cfg, gpu):
    torch.cuda.set_device(gpu)

    # Network Builders
    net_encoder = ModelBuilder.build_encoder(arch=cfg.MODEL.arch_encoder,
                                             fc_dim=cfg.MODEL.fc_dim,
                                             weights=cfg.MODEL.weights_encoder)
    net_decoder = ModelBuilder.build_decoder(arch=cfg.MODEL.arch_decoder,
                                             fc_dim=cfg.MODEL.fc_dim,
                                             num_class=cfg.DATASET.num_class,
                                             weights=cfg.MODEL.weights_decoder,
                                             use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    # Dataset and Loader
    dataset_test = TestDataset(cfg.list_test, cfg.DATASET)
    loader_test = torch.utils.data.DataLoader(
        dataset_test,
        batch_size=cfg.TEST.batch_size,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=5,
        drop_last=True)
    segmentation_module.cuda()

    # Main loop
    test(segmentation_module, loader_test, gpu)

    print('Inference done!')
Esempio n. 4
0
def worker(args, dev_id, start_idx, end_idx, result_queue):
    torch.cuda.set_device(dev_id)

    # Dataset and Loader
    dataset_val = ValDataset(
        broden_dataset.record_list['validation'], args,
        max_sample=args.num_val, start_idx=start_idx,
        end_idx=end_idx)
    loader_val = torchdata.DataLoader(
        dataset_val,
        batch_size=args.batch_size,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=2)

    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(
        arch=args.arch_encoder,
        fc_dim=args.fc_dim,
        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(
        arch=args.arch_decoder,
        fc_dim=args.fc_dim,
        nr_classes=args.nr_classes,
        weights=args.weights_decoder,
        use_softmax=True)

    segmentation_module = SegmentationModule(net_encoder, net_decoder)

    segmentation_module.cuda()

    # Main loop
    evaluate(segmentation_module, loader_val, args, dev_id, result_queue)
def worker(args, dev_id, start_idx, end_idx, result_queue):
    torch.cuda.set_device(dev_id)

    # Dataset and Loader
    dataset_val = ValDataset(
        broden_dataset.record_list['validation'], args,
        max_sample=args.num_val, start_idx=start_idx,
        end_idx=end_idx)
    loader_val = torchdata.DataLoader(
        dataset_val,
        batch_size=args.batch_size,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=2)

    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(
        arch=args.arch_encoder,
        fc_dim=args.fc_dim,
        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(
        arch=args.arch_decoder,
        fc_dim=args.fc_dim,
        nr_classes=args.nr_classes,
        weights=args.weights_decoder,
        use_softmax=True)

    segmentation_module = SegmentationModule(net_encoder, net_decoder)

    segmentation_module.cuda()

    # Main loop
    evaluate(segmentation_module, loader_val, args, dev_id, result_queue)
Esempio n. 6
0
def main(args):
    torch.cuda.set_device(args.gpu_id)

    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(arch=args.arch_encoder,
                                        fc_dim=args.fc_dim,
                                        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(arch=args.arch_decoder,
                                        fc_dim=args.fc_dim,
                                        weights=args.weights_decoder,
                                        use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    # Dataset and Loader
    dataset_val = ValDataset(args.list_val, args, max_sample=args.num_val)
    loader_val = torchdata.DataLoader(dataset_val,
                                      batch_size=args.batch_size,
                                      shuffle=False,
                                      collate_fn=user_scattered_collate,
                                      num_workers=5,
                                      drop_last=True)

    segmentation_module.cuda()

    # Main loop
    evaluate(segmentation_module, loader_val, args)

    print('Evaluation Done!')
Esempio n. 7
0
def main(args):
    torch.cuda.set_device(args.gpu_id)

    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(arch=args.arch_encoder,
                                        fc_dim=args.fc_dim,
                                        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(arch=args.arch_decoder,
                                        fc_dim=args.fc_dim,
                                        nr_classes=args.nr_classes,
                                        weights=args.weights_decoder,
                                        use_softmax=True)

    segmentation_module = SegmentationModule(net_encoder, net_decoder)
    segmentation_module.cuda()

    # Dataset and Loader
    list_test = [{'fpath_img': args.test_img}]
    dataset_val = TestDataset(list_test, args, max_sample=args.num_val)
    loader_val = torchdata.DataLoader(dataset_val,
                                      batch_size=args.batch_size,
                                      shuffle=False,
                                      collate_fn=user_scattered_collate,
                                      num_workers=5,
                                      drop_last=True)

    # Main loop
    test(segmentation_module, loader_val, args)

    print('Inference done!')
Esempio n. 8
0
def segment(args, h, w):
    # absolute paths of model weights
    weights_encoder = os.path.join(args.model_path, 'encoder' + args.suffix)
    weights_decoder = os.path.join(args.model_path, 'decoder' + args.suffix)

    assert os.path.exists(weights_encoder) and \
        os.path.exists(weights_encoder), 'checkpoint does not exitst!'

    builder = ModelBuilder()
    net_encoder = builder.build_encoder(arch=args.arch_encoder,
                                        fc_dim=args.fc_dim,
                                        weights=weights_encoder)
    net_decoder = builder.build_decoder(arch=args.arch_decoder,
                                        fc_dim=args.fc_dim,
                                        num_class=args.num_class,
                                        weights=weights_decoder,
                                        use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)
    segmentation_module.eval()
    segmentation_module.cuda()

    # Dataset and Loader
    data_dict = {"in": args.content, "tar": args.style}
    data = load_data(data_dict, args)

    # Main loop
    with torch.no_grad():
        res = test(segmentation_module, data, (h, w), args)
    print('Inference done!')
    return res
def worker(args, gpu_id, start_idx, end_idx, result_queue):
    torch.cuda.set_device(gpu_id)

    # Dataset and Loader
    dataset_val = ValDataset(args.list_val,
                             args,
                             max_sample=args.num_val,
                             start_idx=start_idx,
                             end_idx=end_idx)
    loader_val = torchdata.DataLoader(dataset_val,
                                      batch_size=args.batch_size,
                                      shuffle=False,
                                      collate_fn=user_scattered_collate,
                                      num_workers=2)

    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(arch=args.arch_encoder,
                                        fc_dim=args.fc_dim,
                                        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(arch=args.arch_decoder,
                                        fc_dim=args.fc_dim,
                                        num_class=args.num_class,
                                        weights=args.weights_decoder,
                                        use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    segmentation_module.cuda()

    # Main loop
    evaluate(segmentation_module, loader_val, args, gpu_id, result_queue)
Esempio n. 10
0
def main(args):
    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(arch=args.arch_encoder,
                                        fc_dim=args.fc_dim,
                                        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(arch=args.arch_decoder,
                                        fc_dim=args.fc_dim,
                                        segSize=args.segSize,
                                        weights=args.weights_decoder,
                                        use_softmax=True)

    crit = nn.NLLLoss2d(ignore_index=-1)

    # Dataset and Loader
    dataset_val = Dataset(args.list_val,
                          args,
                          max_sample=args.num_val,
                          is_train=0)
    loader_val = torch.utils.data.DataLoader(dataset_val,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=2,
                                             drop_last=True)

    nets = (net_encoder, net_decoder, crit)
    for net in nets:
        net.cuda()

    # Main loop
    evaluate(nets, loader_val, args)

    print('Evaluation Done!')
Esempio n. 11
0
def main(cfg, gpu):
    torch.cuda.set_device(gpu)

    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(arch=cfg.MODEL.arch_encoder.lower(),
                                        fc_dim=cfg.MODEL.fc_dim,
                                        weights=cfg.MODEL.weights_encoder)
    net_decoder = builder.build_decoder(arch=cfg.MODEL.arch_decoder.lower(),
                                        fc_dim=cfg.MODEL.fc_dim,
                                        num_class=cfg.DATASET.num_class,
                                        weights=cfg.MODEL.weights_decoder,
                                        use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    # Dataset and Loader
    dataset_val = ValDataset(cfg.DATASET.root_dataset, cfg.DATASET.list_val,
                             cfg.DATASET)
    loader_val = torchdata.DataLoader(dataset_val,
                                      batch_size=cfg.VAL.batch_size,
                                      shuffle=False,
                                      collate_fn=user_scattered_collate,
                                      num_workers=5,
                                      drop_last=True)

    segmentation_module.cuda()

    # Main loop
    evaluate(segmentation_module, loader_val, cfg, gpu)

    print('Evaluation Done!')
def load_model(args):
    args.arch_encoder = args.arch_encoder.lower()
    args.arch_decoder = args.arch_decoder.lower()
    print("Input arguments:")
    for key, val in vars(args).items():
        print("{:16} {}".format(key, val))

    # absolute paths of model weights
    args.weights_encoder = os.path.join(DIR_PATH, 'models', args.model_path,
                                        'encoder' + args.suffix)
    args.weights_decoder = os.path.join(DIR_PATH, 'models', args.model_path,
                                        'decoder' + args.suffix)

    assert os.path.exists(args.weights_encoder) and \
        os.path.exists(args.weights_encoder), 'checkpoint does not exitst!'

    torch.cuda.set_device(args.gpu)
    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(arch=args.arch_encoder,
                                        fc_dim=args.fc_dim,
                                        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(arch=args.arch_decoder,
                                        fc_dim=args.fc_dim,
                                        num_class=args.num_class,
                                        weights=args.weights_decoder,
                                        use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)
    segmentation_module.eval()
    if torch.cuda.is_available():
        segmentation_module.cuda()
    return segmentation_module
Esempio n. 13
0
def main(args):
    torch.cuda.set_device(args.gpu)

    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(
        arch=args.arch_encoder,
        fc_dim=args.fc_dim,
        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(
        arch=args.arch_decoder,
        fc_dim=args.fc_dim,
        num_class=args.num_class,
        weights=args.weights_decoder,
        use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    segmentation_module.cuda()

    # Main loop
    cap = cv2.VideoCapture(0)

    cam_test(segmentation_module, cap, args)

    print('Inference done!')
Esempio n. 14
0
def resave():
    arch_encoder = "resnet50"
    arch_decoder = "ppm_bilinear_deepsup"

    # weights_encoder="./ckpt/MacNetV2_mobilev2_scse_ppm/encoder_epoch_19.pth"
    weights_encoder = ""
    weights_decoder = ""

    fc_dim = 2048

    builder = ModelBuilder()
    net_encoder = builder.build_encoder(arch=arch_encoder,
                                        fc_dim=fc_dim,
                                        weights=weights_encoder,
                                        use_softmax=True)
    net_decoder = builder.build_decoder(arch=arch_decoder,
                                        fc_dim=fc_dim,
                                        num_class=14,
                                        weights=weights_decoder)

    #net_encoder = resnet.__dict__['resnet50'](pretrained=False)
    net_encoder = resnet.resnet50(pretrained=False)

    print(net_encoder)

    save_path = "./ckpt/MacNetV2_mobilev2_scse_ppm/encoder_full_epoch_19.pth"
    torch.save(net_encoder, save_path)
Esempio n. 15
0
def setup_test(gpu_id=0,
               encoder_type="resnet50dilated",
               decoder_type="ppm_deepsup",
               fc_dim=2048,
               model_path="baseline-resnet50dilated-ppm_deepsup",
               num_class=150,
               suffix="_epoch_20.pth"):
    torch.cuda.set_device(gpu_id)

    # absolute paths of model weights
    weights_encoder = os.path.join(model_path, 'encoder' + suffix)
    weights_decoder = os.path.join(model_path, 'decoder' + suffix)

    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(arch=encoder_type,
                                        fc_dim=fc_dim,
                                        weights=weights_encoder)
    net_decoder = builder.build_decoder(arch=decoder_type,
                                        fc_dim=fc_dim,
                                        num_class=num_class,
                                        weights=weights_decoder,
                                        use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)
    segmentation_module.cuda()
    segmentation_module.eval()
    return segmentation_module
Esempio n. 16
0
def main(args):
    torch.cuda.set_device(args.gpu_id)

    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(
        arch=args.arch_encoder,
        fc_dim=args.fc_dim,
        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(
        arch=args.arch_decoder,
        fc_dim=args.fc_dim,
        nr_classes=args.nr_classes,
        weights=args.weights_decoder,
        use_softmax=True)
    
    segmentation_module = SegmentationModule(net_encoder, net_decoder)
    segmentation_module.cuda()

    # Dataset and Loader
    list_test = [{'fpath_img': args.test_img}]
    dataset_val = TestDataset(
        list_test, args, max_sample=args.num_val)
    loader_val = torchdata.DataLoader(
        dataset_val,
        batch_size=args.batch_size,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=5,
        drop_last=True)

    # Main loop
    test(segmentation_module, loader_val, args)

    print('Inference done!')
def main(args):
    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(arch=args.arch_encoder,
                                        fc_dim=args.fc_dim,
                                        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(arch=args.arch_decoder,
                                        fc_dim=args.fc_dim,
                                        segSize=args.segSize,
                                        num_class=args.num_class,
                                        weights=args.weights_decoder)

    crit = nn.NLLLoss2d(ignore_index=0)

    # Dataset and Loader

    dataset_train = ConceptDataset(args)
    loader_train = torch.utils.data.DataLoader(
        dataset_train,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=int(args.workers),
        drop_last=True)
    args.epoch_iters = int(len(dataset_train) / args.batch_size)
    print('1 Epoch = {} iters'.format(args.epoch_iters))

    # load nets into gpu
    if args.num_gpus > 1:
        net_encoder = nn.DataParallel(net_encoder,
                                      device_ids=range(args.num_gpus))
        net_decoder = nn.DataParallel(net_decoder,
                                      device_ids=range(args.num_gpus))
    nets = (net_encoder, net_decoder, crit)
    if args.num_gpus > 0:
        for net in nets:
            net.cuda()

    # Set up optimizers
    optimizers = create_optimizers(nets, args)

    # Main loop
    history = {split: {'epoch': [], 'err': [], 'acc': []}
               for split in ('train', 'val')}
    # initial eval
    # evaluate(nets, loader_val, history, 0, args)
    for epoch in range(1, args.num_epoch + 1):
        train(nets, loader_train, optimizers, history, epoch, args)

        # Evaluation and visualization
        # if epoch % args.eval_epoch == 0:
        #     evaluate(nets, loader_val, history, epoch, args)

        # checkpointing
        checkpoint(nets, history, args)

        # adjust learning rate
        adjust_learning_rate(optimizers, epoch, args)

    print('Training Done!')
Esempio n. 18
0
def fetch_teacher_outputs(args):
    teacher_outputs = []
    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(arch=args.arch_encoder,
                                        fc_dim=args.fc_dim,
                                        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(arch=args.arch_decoder,
                                        fc_dim=args.fc_dim,
                                        num_class=args.num_class,
                                        weights=args.weights_decoder)
    crit = nn.NLLLoss(ignore_index=-1)
    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)
    # dataset and loader, use train
    dataset_train = TrainDataset(args.list_train,
                                 args,
                                 batch_per_gpu=args.batch_size_per_gpu)

    loader_train = torchdata.DataLoader(
        dataset_train,
        batch_size=args.num_gpus,  # we have modified data_parallel
        shuffle=False,  # we do not use this param
        collate_fn=user_scattered_collate,
        num_workers=int(args.workers),
        drop_last=True,
        pin_memory=True)
    segmentation_module.cuda()

    # here may be some problems
    for i, batch_data in enumerate(loader_train):

        batch_data = batch_data[0]  # get data list
        seg_label = as_numpy(batch_data['seg_label'][0])
        img_resized_list = batch_data['img_data']

        with torch.no_grad():
            segSize = (seg_label.shape[0], seg_label.shape[1])
            # 预测输出的形状
            pred = torch.zeros(1, args.num_class, segSize[0], segSize[1])

            for img in img_resized_list:
                feed_dict = batch_data.copy()
                feed_dict['img_data'] = img
                del feed_dict['img_ori']
                del feed_dict['info']
                feed_dict = async_copy_to(feed_dict, args.gpu_id)

                # forward pass
                pred_tmp = segmentation_module(feed_dict, segSize=segSize)
                pred = pred + pred_tmp.cpu() / len(args.imgSize)

            _, preds = torch.max(pred, dim=1)
            preds.as_numpy(preds.squeeze(0))
            teacher_outputs.append(preds)
    return teacher_outputs
def main(args):
    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(weights=args.weights_encoder)
    net_decoder_1 = builder.build_decoder(weights=args.weights_decoder_1)
    net_decoder_2 = builder.build_decoder(arch='c1',weights=args.weights_decoder_2)

    if args.weighted_class:
        crit = nn.NLLLoss(ignore_index=-1, weight=args.class_weight)
    else:
        crit = nn.NLLLoss(ignore_index=-1)

    # Dataset and Loader
    dataset_train = GTA(root=args.root_gta, cropSize=args.imgSize, is_train=1)
    dataset_val = CityScapes('val', root=args.root_cityscapes, cropSize=args.imgSize,
                             max_sample=args.num_val, is_train=0)
    dataset_val_2 = BDD('val', root=args.root_bdd, cropSize=args.imgSize,
                        max_sample=args.num_val, is_train=0)

    loader_val = torch.utils.data.DataLoader(
        dataset_val,
        batch_size=args.batch_size_eval,
        shuffle=False,
        num_workers=int(args.workers),
        drop_last=True)
    loader_val_2 = torch.utils.data.DataLoader(
        dataset_val_2,
        batch_size=args.batch_size_eval,
        shuffle=False,
        num_workers=int(args.workers),
        drop_last=True)
    args.epoch_iters = int(len(dataset_train) / args.batch_size)
    print('1 Epoch = {} iters'.format(args.epoch_iters))

    # load nets into gpu
    if args.num_gpus > 1:
        net_encoder = nn.DataParallel(net_encoder,
                                      device_ids=range(args.num_gpus))
        net_decoder_1 = nn.DataParallel(net_decoder_1,
                                        device_ids=range(args.num_gpus))
        net_decoder_2 = nn.DataParallel(net_decoder_2,
                                        device_ids=range(args.num_gpus))

    nets = (net_encoder, net_decoder_1, net_decoder_2, crit)
    for net in nets:
        net.cuda()

    history = {split: {'epoch': [], 'err': [], 'acc': [], 'mIoU': []}
               for split in ('train', 'val', 'val_2')}

    # eval
    evaluate(nets, loader_val, loader_val_2, history, 0, args)
    print('Evaluation Done!')
Esempio n. 20
0
def main(args):
    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(arch=args.arch_encoder,
                                        fc_dim=args.fc_dim,
                                        weights=args.weights_encoder)
    nr_classes = broden_dataset.nr.copy()
    nr_classes['part'] = sum(
        [len(parts) for obj, parts in broden_dataset.object_part.items()])
    net_decoder = builder.build_decoder(arch=args.arch_decoder,
                                        fc_dim=args.fc_dim,
                                        nr_classes=nr_classes,
                                        weights=args.weights_decoder)

    # TODO(LYC):: move criterion outside model.
    # crit = nn.NLLLoss(ignore_index=-1)

    if args.arch_decoder.endswith('deepsup'):
        segmentation_module = SegmentationModule(net_encoder, net_decoder,
                                                 args.deep_sup_scale)
    else:
        segmentation_module = SegmentationModule(net_encoder, net_decoder)

    print('1 Epoch = {} iters'.format(args.epoch_iters))

    # create loader iterator
    iterator_train = create_multi_source_train_data_loader(args=args)

    # load nets into gpu
    if args.num_gpus > 1:
        segmentation_module = UserScatteredDataParallel(segmentation_module,
                                                        device_ids=range(
                                                            args.num_gpus))
        # For sync bn
        patch_replication_callback(segmentation_module)
    segmentation_module.cuda()

    # Set up optimizers
    nets = (net_encoder, net_decoder)
    optimizers = create_optimizers(nets, args)

    # Main loop
    history = {'train': {'epoch': [], 'loss': [], 'acc': []}}

    for epoch in range(args.start_epoch, args.num_epoch + 1):
        train(segmentation_module, iterator_train, optimizers, history, epoch,
              args)

        # checkpointing
        checkpoint(nets, history, args, epoch)

    print('Training Done!')
Esempio n. 21
0
def main(args):
    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(
        arch=args.arch_encoder,
        fc_dim=args.fc_dim,
        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(
        arch=args.arch_decoder,
        fc_dim=args.fc_dim,
        nr_classes=args.nr_classes,
        weights=args.weights_decoder)

    # TODO(LYC):: move criterion outside model.
    # crit = nn.NLLLoss(ignore_index=-1)

    if args.arch_decoder.endswith('deepsup'):
        segmentation_module = SegmentationModule(
            net_encoder, net_decoder, args.deep_sup_scale)
    else:
        segmentation_module = SegmentationModule(
            net_encoder, net_decoder)

    print('1 Epoch = {} iters'.format(args.epoch_iters))

    # create loader iterator
    iterator_train = create_multi_source_train_data_loader(args=args)

    # load nets into gpu
    if args.num_gpus > 1:
        segmentation_module = UserScatteredDataParallel(
            segmentation_module,
            device_ids=range(args.num_gpus))
        # For sync bn
        patch_replication_callback(segmentation_module)
    segmentation_module.cuda()

    # Set up optimizers
    nets = (net_encoder, net_decoder)
    optimizers = create_optimizers(nets, args)

    # Main loop
    history = {'train': {'epoch': [], 'loss': [], 'acc': []}}

    for epoch in range(args.start_epoch, args.num_epoch + 1):
        train(segmentation_module, iterator_train, optimizers, history, epoch, args)

        # checkpointing
        checkpoint(nets, history, args, epoch)

    print('Training Done!')
Esempio n. 22
0
def main(args):
    torch.cuda.set_device(args.gpu)

    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(
        arch=args.arch_encoder,
        fc_dim=args.fc_dim,
        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(
        arch=args.arch_decoder,
        fc_dim=args.fc_dim,
        num_class=args.num_class,
        weights=args.weights_decoder,
        use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    # Dataset and Loader
    # list_test = [{'fpath_img': args.test_img}]
#     test_chk = []
#     testing = os.listdir("/home/teai/externalhd2/BDD100K/segmentation_v2/test/")
#     for i in testing:
#         if(i.endswith(".jpg")):
#             test_chk.append("/home/teai/externalhd2/BDD100K/segmentation_v2/test/"+i)
    test_chk = ['./05b07068-373666cb.jpg']
    print(type(args.test_imgs))
    list_test = [{'fpath_img': x} for x in test_chk]
    #list_test=[{'fpath_img': 'frame_143.png'},{'fpath_img': 'frame_100.png'},{'fpath_img': 'frame_1.png'}]
    #print("list_test",list_test)
    dataset_test = TestDataset(
        list_test, args, max_sample=args.num_val)
    loader_test = torchdata.DataLoader(
        dataset_test,
        batch_size=args.batch_size,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=5,
        drop_last=True)

    segmentation_module.cuda()

    # Main loop
    start=time.time()
    test(segmentation_module, loader_test, args)
    end=time.time()
Esempio n. 23
0
    def main(self):

        torch.cuda.set_device(self.gpu)

        # Network Builders
        net_encoder = ModelBuilder.build_encoder(
            arch=self.cfg.MODEL.arch_encoder,
            fc_dim=self.cfg.MODEL.fc_dim,
            weights=self.cfg.MODEL.weights_encoder)
        net_decoder = ModelBuilder.build_decoder(
            arch=self.cfg.MODEL.arch_decoder,
            fc_dim=self.cfg.MODEL.fc_dim,
            num_class=self.cfg.DATASET.num_class,
            weights=self.cfg.MODEL.weights_decoder,
            use_softmax=True)

        crit = nn.NLLLoss(ignore_index=-1)

        self.segmentation_module = SegmentationModule(net_encoder, net_decoder,
                                                      crit)
        self.segmentation_module.cuda()

        self.seg_pub = rospy.Publisher(self.img_out,
                                       sensor_msgs.msg.Image,
                                       queue_size=1)
        rospy.Subscriber(self.img_in, sensor_msgs.msg.Image,
                         self.image_callback)

        rospy.loginfo("Listening for image messages on topic %s..." %
                      self.img_in)
        rospy.loginfo("Publishing segmented images to topic %s..." %
                      self.img_out)

        rospy.loginfo("Waiting for loader from queue...")
        while not rospy.is_shutdown():
            rospy.sleep(0.01)
            try:
                loader = self.loader_q.get_nowait()
                self.ready = False
                self.run_inference(loader)
                self.ready = True
            except queue.Empty:
                pass

        rospy.spin()
Esempio n. 24
0
def main(args):
    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(arch=args.arch_encoder,
                                        fc_dim=args.fc_dim,
                                        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(arch=args.arch_decoder,
                                        fc_dim=args.fc_dim,
                                        segSize=args.segSize,
                                        weights=args.weights_decoder,
                                        use_softmax=True)

    nets = (net_encoder, net_decoder)
    for net in nets:
        net.cuda()

    # single pass
    test(nets, args)
def inference(cfg, image_path, gpu=0):
    torch.cuda.set_device(gpu)

    # Network Builders
    net_encoder = ModelBuilder.build_encoder(arch=cfg.MODEL.arch_encoder,
                                             fc_dim=cfg.MODEL.fc_dim,
                                             weights=cfg.MODEL.weights_encoder)
    net_decoder = ModelBuilder.build_decoder(arch=cfg.MODEL.arch_decoder,
                                             fc_dim=cfg.MODEL.fc_dim,
                                             num_class=cfg.DATASET.num_class,
                                             weights=cfg.MODEL.weights_decoder,
                                             use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    # Dataset and Loader
    '''
    dataset_test = TestDataset(
        cfg.list_test,
        cfg.DATASET)
    '''
    dataset_test = TestROPRidgeDataset(
        root_dataset=cfg.DATASET.root_dataset,
        opt=cfg.DATASET,
        img_folder=cfg.DATASET.img_folder_val,
        annotation_folder="annotations",
        anno_filename=cfg.DATASET.list_val,
        batch_per_gpu=cfg.TRAIN.batch_size_per_gpu)
    loader_test = torch.utils.data.DataLoader(
        dataset_test,
        batch_size=cfg.TEST.batch_size,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=5,
        drop_last=True)

    segmentation_module.cuda()

    # Main loop
    test(segmentation_module, image_path, gpu)

    print('Inference done!')
Esempio n. 26
0
def main(args):
    # download data from buckets
    #download_from_buckets(args.test_img)
    bucket_name = "ubiquity-kube-mlengine-pytorch_trial"
    source_blob_name = "ade20k/davidgarrett.jpg"
    destination_file_name = "/davidgarrett.jpg"
    download_blob(bucket_name, source_blob_name, destination_file_name)
    args.test_img = destination_file_name

    bucket_url = args.weights_encoder
    print(bucket_url, type(bucket_url))
    destination_folder = 'model_path_en'
    args.weights_encoder = download_from_buckets(bucket_url,
                                                 destination_folder)
    bucket_url = args.weights_decoder
    print(bucket_url, type(bucket_url))
    destination_folder = 'model_path_de'
    args.weights_decoder = download_from_buckets(bucket_url,
                                                 destination_folder)
    print('gsutil works')

    # Network Builders
    writer = SummaryWriter()

    builder = ModelBuilder()
    net_encoder = builder.build_encoder(arch=args.arch_encoder,
                                        fc_dim=args.fc_dim,
                                        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(arch=args.arch_decoder,
                                        fc_dim=args.fc_dim,
                                        segSize=args.segSize,
                                        weights=args.weights_decoder,
                                        use_softmax=True)

    nets = (net_encoder, net_decoder)
    if torch.cuda.is_available():
        for net in nets:
            net.cuda()

    # single pass
    test(nets, args)

    print('Done! Output is saved in {}'.format(args.result))
    def _prepare_module(self):
        net_encoder = ModelBuilder.build_encoder(
            arch=self.cfg.MODEL.arch_encoder,
            fc_dim=self.cfg.MODEL.fc_dim,
            weights=self.cfg.MODEL.weights_encoder)
        net_decoder = ModelBuilder.build_decoder(
            arch=self.cfg.MODEL.arch_decoder,
            fc_dim=self.cfg.MODEL.fc_dim,
            num_class=self.cfg.DATASET.num_class,
            weights=self.cfg.MODEL.weights_decoder,
            use_softmax=True)

        crit = nn.NLLLoss(ignore_index=-1)

        segmentation_module = SegmentationModule(net_encoder, net_decoder,
                                                 crit)
        segmentation_module.cuda()

        return segmentation_module
Esempio n. 28
0
def module_init(cfg):
    # Network Builders
    net_encoder = ModelBuilder.build_encoder(arch=cfg.MODEL.arch_encoder, \
                                             fc_dim=cfg.MODEL.fc_dim, \
                                             weights=cfg.MODEL.weights_encoder)
    net_decoder = ModelBuilder.build_decoder(arch=cfg.MODEL.arch_decoder, \
                                             fc_dim=cfg.MODEL.fc_dim, \
                                             num_class=cfg.DATASET.num_class, \
                                             weights=cfg.MODEL.weights_decoder, \
                                             use_softmax=True)
    # NLLLoss
    crit = nn.NLLLoss(ignore_index=-1)
    # Instantiate segmentation module
    segmentation_module = SegmentationModule(net_encoder, net_decoder,
                                             crit).cuda()
    # Evaluation mode
    segmentation_module.eval()

    return segmentation_module
Esempio n. 29
0
def loadModel(model):
    net_encoder = ModelBuilder.build_encoder(arch=model["encoder_arch"],
                                             fc_dim=model["fc_dim"],
                                             weights=os.path.join(
                                                 cwd,
                                                 model["encoder_weights"]))
    net_decoder = ModelBuilder.build_decoder(arch=model["decoder_arch"],
                                             fc_dim=model["fc_dim"],
                                             num_class=model["num_class"],
                                             weights=os.path.join(
                                                 cwd,
                                                 model["decoder_weights"]),
                                             use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)
    segmentation_module.cuda()
    return segmentation_module
Esempio n. 30
0
def main(args):
    torch.cuda.set_device(args.gpu_id)

    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(arch=args.arch_encoder,
                                        fc_dim=args.fc_dim,
                                        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(arch=args.arch_decoder,
                                        fc_dim=args.fc_dim,
                                        num_class=args.num_class,
                                        weights=args.weights_decoder,
                                        use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    # Dataset and Loader
    # list_test = [{'fpath_img': args.test_img}]

    # Pass a single image
    if args.test_imgs is not None:
        list_test = [{'fpath_img': x} for x in args.test_imgs]
    if args.test_img_path is not None:
        list_test = [{
            'fpath_img': os.path.join(args.test_img_path, x)
        } for x in os.listdir(args.test_img_path)]
        list_test = list_test[:20]
    dataset_test = TestDataset(list_test, args, max_sample=args.num_val)
    loader_test = torchdata.DataLoader(dataset_test,
                                       batch_size=args.batch_size,
                                       shuffle=False,
                                       collate_fn=user_scattered_collate,
                                       num_workers=5,
                                       drop_last=True)

    segmentation_module.cuda()

    # Main loop
    test(segmentation_module, loader_test, args)

    print('Inference done!')
Esempio n. 31
0
def main(args):
    torch.cuda.set_device(args.gpu)

    crit = nn.NLLLoss(ignore_index=-1)

    # Dataset and Loader
    dataset_val = Dataset(args,
                          split_name=args.split_name,
                          batch_per_gpu=args.batchsize)
    loader_val = torchdata.DataLoader(dataset_val,
                                      batch_size=args.batchsize,
                                      shuffle=False,
                                      collate_fn=user_scattered_collate,
                                      num_workers=5,
                                      drop_last=True)

    # Network Builders
    builder = ModelBuilder()
    print('Loading encoder from: %s' % (args.weights_encoder))
    print('Loading decoder from: %s' % (args.weights_decoder))
    net_encoder = builder.build_encoder(arch=args.arch_encoder,
                                        fc_dim=args.fc_dim,
                                        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(arch=args.arch_decoder,
                                        fc_dim=args.fc_dim,
                                        num_class=dataset_val.num_classes,
                                        weights=args.weights_decoder,
                                        use_softmax=True)
    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    segmentation_module.cuda()

    # Main loop
    if args.dataset.startswith('nyuv2sn'):
        metrics = evaluate_surface_normals(segmentation_module, loader_val,
                                           args)
    else:
        metrics = evaluate_segmentation(segmentation_module, loader_val, args)

    save_pickle(args.result_file, metrics)

    print('Evaluation Done!')
Esempio n. 32
0
def main(args):
    torch.cuda.set_device(args.gpu)

    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(
        arch=args.arch_encoder,
        fc_dim=args.fc_dim,
        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(
        arch=args.arch_decoder,
        fc_dim=args.fc_dim,
        num_class=args.num_class,
        weights=args.weights_decoder,
        use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    # Dataset and Loader
    if len(args.test_imgs) == 1 and os.path.isdir(args.test_imgs[0]):
        test_imgs = find_recursive(args.test_imgs[0])
    else:
        test_imgs = args.test_imgs
    list_test = [{'fpath_img': x} for x in test_imgs]
    dataset_test = TestDataset(
        list_test, args, max_sample=args.num_val)
    loader_test = torchdata.DataLoader(
        dataset_test,
        batch_size=args.batch_size,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=5,
        drop_last=True)

    segmentation_module.cuda()

    # Main loop
    test(segmentation_module, loader_test, args)

    print('Inference done!')
def main(args):
    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(arch=args.arch_encoder,
                                        fc_dim=args.fc_dim,
                                        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(arch=args.arch_decoder,
                                        fc_dim=args.fc_dim,
                                        segSize=args.segSize,
                                        weights=args.weights_decoder,
                                        use_softmax=True)

    nets = (net_encoder, net_decoder)
    for net in nets:
        net.cuda()

    # single pass
    test(nets, args)

    print('Done! Output is saved in {}'.format(args.result))