Example #1
0
def main(args):
    torch.cuda.set_device(args.gpu_id)

    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(
        arch=args.arch_encoder,
        fc_dim=args.fc_dim,
        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(
        arch=args.arch_decoder,
        fc_dim=args.fc_dim,
        nr_classes=args.nr_classes,
        weights=args.weights_decoder,
        use_softmax=True)
    
    segmentation_module = SegmentationModule(net_encoder, net_decoder)
    segmentation_module.cuda()

    # Dataset and Loader
    list_test = [{'fpath_img': args.test_img}]
    dataset_val = TestDataset(
        list_test, args, max_sample=args.num_val)
    loader_val = torchdata.DataLoader(
        dataset_val,
        batch_size=args.batch_size,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=5,
        drop_last=True)

    # Main loop
    test(segmentation_module, loader_val, args)

    print('Inference done!')
Example #2
0
def main(args):
    torch.cuda.set_device(args.gpu)

    # Network Builders
    builder = ModelBuilder()

    unet = builder.build_unet(num_class=args.num_class,
                              arch=args.arch_unet,
                              weights=args.weights_unet1)

    crit = DualLoss(mode='val')

    sm = SegmentationModule(crit, unet)
    test_augs = ComposeTest([PaddingCenterCropTest(256)])

    ac17 = AC17(root=args.data_root,
                augmentations=test_augs,
                img_norm=args.img_norm)

    loader_val = data.DataLoader(ac17,
                                 batch_size=1,
                                 shuffle=False,
                                 collate_fn=user_scattered_collate,
                                 num_workers=5,
                                 drop_last=True)

    sm.cuda()

    # Main loop
    evaluate(sm, loader_val, args)

    print('Evaluation Done!')
Example #3
0
def main(args):
    torch.cuda.set_device(args.gpu)

    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(
        arch=args.arch_encoder,
        fc_dim=args.fc_dim,
        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(
        arch=args.arch_decoder,
        fc_dim=args.fc_dim,
        num_class=args.num_class,
        weights=args.weights_decoder,
        use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    segmentation_module.cuda()

    # Main loop
    cap = cv2.VideoCapture(0)

    cam_test(segmentation_module, cap, args)

    print('Inference done!')
def worker(args, dev_id, start_idx, end_idx, result_queue):
    torch.cuda.set_device(dev_id)

    # Dataset and Loader
    dataset_val = ValDataset(
        broden_dataset.record_list['validation'], args,
        max_sample=args.num_val, start_idx=start_idx,
        end_idx=end_idx)
    loader_val = torchdata.DataLoader(
        dataset_val,
        batch_size=args.batch_size,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=2)

    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(
        arch=args.arch_encoder,
        fc_dim=args.fc_dim,
        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(
        arch=args.arch_decoder,
        fc_dim=args.fc_dim,
        nr_classes=args.nr_classes,
        weights=args.weights_decoder,
        use_softmax=True)

    segmentation_module = SegmentationModule(net_encoder, net_decoder)

    segmentation_module.cuda()

    # Main loop
    evaluate(segmentation_module, loader_val, args, dev_id, result_queue)
Example #5
0
def main(args):
    torch.cuda.set_device(args.gpu)

    # Network Builders
    builder = ModelBuilder()

    unet = builder.build_unet(num_class=args.num_class,
        arch=args.arch_unet,
        weights=args.weights_unet)

    sm = SegmentationModule(None, unet)
    test_augs = ComposeTest([PaddingCenterCropTest(256)])

    lungdata = LungData(
            root=args.data_root,
            split='test', 
            augmentations=test_augs)

    loader_val = data.DataLoader(
        lungdata,
        batch_size=1,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=0,
        drop_last=True)

    sm.cuda()

    # Main loop
    evaluate(sm, loader_val, args)

    print('Evaluation Done!')
def worker(cfg, gpu_id, start_idx, end_idx, result_queue):
    torch.cuda.set_device(gpu_id)

    # Dataset and Loader
    dataset_val = ValDataset(cfg.DATASET.root_dataset,
                             cfg.DATASET.list_val,
                             cfg.DATASET,
                             start_idx=start_idx,
                             end_idx=end_idx)
    loader_val = torch.utils.data.DataLoader(dataset_val,
                                             batch_size=cfg.VAL.batch_size,
                                             shuffle=False,
                                             collate_fn=user_scattered_collate,
                                             num_workers=2)

    # Network Builders
    net_encoder = ModelBuilder.build_encoder(
        arch=cfg.MODEL.arch_encoder.lower(),
        fc_dim=cfg.MODEL.fc_dim,
        weights=cfg.MODEL.weights_encoder)
    net_decoder = ModelBuilder.build_decoder(
        arch=cfg.MODEL.arch_decoder.lower(),
        fc_dim=cfg.MODEL.fc_dim,
        num_class=cfg.DATASET.num_class,
        weights=cfg.MODEL.weights_decoder,
        use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    segmentation_module.cuda()

    # Main loop
    evaluate(segmentation_module, loader_val, cfg, gpu_id, result_queue)
Example #7
0
def main(args):
    torch.cuda.set_device(args.gpu_id)

    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(arch=args.arch_encoder,
                                        fc_dim=args.fc_dim,
                                        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(arch=args.arch_decoder,
                                        fc_dim=args.fc_dim,
                                        weights=args.weights_decoder,
                                        use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    # Dataset and Loader
    dataset_val = ValDataset(args.list_val, args, max_sample=args.num_val)
    loader_val = torchdata.DataLoader(dataset_val,
                                      batch_size=args.batch_size,
                                      shuffle=False,
                                      collate_fn=user_scattered_collate,
                                      num_workers=5,
                                      drop_last=True)

    segmentation_module.cuda()

    # Main loop
    evaluate(segmentation_module, loader_val, args)

    print('Evaluation Done!')
def worker(args, gpu_id, start_idx, end_idx, result_queue):
    torch.cuda.set_device(gpu_id)

    # Dataset and Loader
    dataset_val = ValDataset(args.list_val,
                             args,
                             max_sample=args.num_val,
                             start_idx=start_idx,
                             end_idx=end_idx)
    loader_val = torchdata.DataLoader(dataset_val,
                                      batch_size=args.batch_size,
                                      shuffle=False,
                                      collate_fn=user_scattered_collate,
                                      num_workers=2)

    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(arch=args.arch_encoder,
                                        fc_dim=args.fc_dim,
                                        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(arch=args.arch_decoder,
                                        fc_dim=args.fc_dim,
                                        num_class=args.num_class,
                                        weights=args.weights_decoder,
                                        use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    segmentation_module.cuda()

    # Main loop
    evaluate(segmentation_module, loader_val, args, gpu_id, result_queue)
Example #9
0
def worker(args, dev_id, start_idx, end_idx, result_queue):
    torch.cuda.set_device(dev_id)

    # Dataset and Loader
    dataset_val = ValDataset(
        broden_dataset.record_list['validation'], args,
        max_sample=args.num_val, start_idx=start_idx,
        end_idx=end_idx)
    loader_val = torchdata.DataLoader(
        dataset_val,
        batch_size=args.batch_size,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=2)

    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(
        arch=args.arch_encoder,
        fc_dim=args.fc_dim,
        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(
        arch=args.arch_decoder,
        fc_dim=args.fc_dim,
        nr_classes=args.nr_classes,
        weights=args.weights_decoder,
        use_softmax=True)

    segmentation_module = SegmentationModule(net_encoder, net_decoder)

    segmentation_module.cuda()

    # Main loop
    evaluate(segmentation_module, loader_val, args, dev_id, result_queue)
Example #10
0
def main(cfg, gpu):
    torch.cuda.set_device(gpu)

    # Network Builders
    net_encoder = ModelBuilder.build_encoder(arch=cfg.MODEL.arch_encoder,
                                             fc_dim=cfg.MODEL.fc_dim,
                                             weights=cfg.MODEL.weights_encoder)
    net_decoder = ModelBuilder.build_decoder(arch=cfg.MODEL.arch_decoder,
                                             fc_dim=cfg.MODEL.fc_dim,
                                             num_class=cfg.DATASET.num_class,
                                             weights=cfg.MODEL.weights_decoder,
                                             use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    # Dataset and Loader
    dataset_test = TestDataset(cfg.list_test, cfg.DATASET)
    loader_test = torch.utils.data.DataLoader(
        dataset_test,
        batch_size=cfg.TEST.batch_size,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=5,
        drop_last=True)
    segmentation_module.cuda()

    # Main loop
    test(segmentation_module, loader_test, gpu)

    print('Inference done!')
Example #11
0
def main(args):
    torch.cuda.set_device(args.gpu_id)

    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(arch=args.arch_encoder,
                                        fc_dim=args.fc_dim,
                                        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(arch=args.arch_decoder,
                                        fc_dim=args.fc_dim,
                                        nr_classes=args.nr_classes,
                                        weights=args.weights_decoder,
                                        use_softmax=True)

    segmentation_module = SegmentationModule(net_encoder, net_decoder)
    segmentation_module.cuda()

    # Dataset and Loader
    list_test = [{'fpath_img': args.test_img}]
    dataset_val = TestDataset(list_test, args, max_sample=args.num_val)
    loader_val = torchdata.DataLoader(dataset_val,
                                      batch_size=args.batch_size,
                                      shuffle=False,
                                      collate_fn=user_scattered_collate,
                                      num_workers=5,
                                      drop_last=True)

    # Main loop
    test(segmentation_module, loader_val, args)

    print('Inference done!')
Example #12
0
def main(cfg, gpu):
    torch.cuda.set_device(gpu)

    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(arch=cfg.MODEL.arch_encoder.lower(),
                                        fc_dim=cfg.MODEL.fc_dim,
                                        weights=cfg.MODEL.weights_encoder)
    net_decoder = builder.build_decoder(arch=cfg.MODEL.arch_decoder.lower(),
                                        fc_dim=cfg.MODEL.fc_dim,
                                        num_class=cfg.DATASET.num_class,
                                        weights=cfg.MODEL.weights_decoder,
                                        use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    # Dataset and Loader
    dataset_val = ValDataset(cfg.DATASET.root_dataset, cfg.DATASET.list_val,
                             cfg.DATASET)
    loader_val = torchdata.DataLoader(dataset_val,
                                      batch_size=cfg.VAL.batch_size,
                                      shuffle=False,
                                      collate_fn=user_scattered_collate,
                                      num_workers=5,
                                      drop_last=True)

    segmentation_module.cuda()

    # Main loop
    evaluate(segmentation_module, loader_val, cfg, gpu)

    print('Evaluation Done!')
Example #13
0
def fetch_teacher_outputs(args):
    teacher_outputs = []
    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(arch=args.arch_encoder,
                                        fc_dim=args.fc_dim,
                                        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(arch=args.arch_decoder,
                                        fc_dim=args.fc_dim,
                                        num_class=args.num_class,
                                        weights=args.weights_decoder)
    crit = nn.NLLLoss(ignore_index=-1)
    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)
    # dataset and loader, use train
    dataset_train = TrainDataset(args.list_train,
                                 args,
                                 batch_per_gpu=args.batch_size_per_gpu)

    loader_train = torchdata.DataLoader(
        dataset_train,
        batch_size=args.num_gpus,  # we have modified data_parallel
        shuffle=False,  # we do not use this param
        collate_fn=user_scattered_collate,
        num_workers=int(args.workers),
        drop_last=True,
        pin_memory=True)
    segmentation_module.cuda()

    # here may be some problems
    for i, batch_data in enumerate(loader_train):

        batch_data = batch_data[0]  # get data list
        seg_label = as_numpy(batch_data['seg_label'][0])
        img_resized_list = batch_data['img_data']

        with torch.no_grad():
            segSize = (seg_label.shape[0], seg_label.shape[1])
            # 预测输出的形状
            pred = torch.zeros(1, args.num_class, segSize[0], segSize[1])

            for img in img_resized_list:
                feed_dict = batch_data.copy()
                feed_dict['img_data'] = img
                del feed_dict['img_ori']
                del feed_dict['info']
                feed_dict = async_copy_to(feed_dict, args.gpu_id)

                # forward pass
                pred_tmp = segmentation_module(feed_dict, segSize=segSize)
                pred = pred + pred_tmp.cpu() / len(args.imgSize)

            _, preds = torch.max(pred, dim=1)
            preds.as_numpy(preds.squeeze(0))
            teacher_outputs.append(preds)
    return teacher_outputs
Example #14
0
def main(cfg, gpu, args, progress):
    gpu_flag = args.gpu_flag
    if gpu_flag and torch.cuda.is_available():
        torch.cuda.set_device(gpu)
        print('使用GPU进行语义分割')
    else:
        print('未开启GPU或未安装CUDA环境,设置使用CPU进行语义分割')

    # Network Builders
    if gpu_flag:
        net_encoder = ModelBuilder.build_encoder(
            arch=cfg.MODEL.arch_encoder,
            fc_dim=cfg.MODEL.fc_dim,
            weights=cfg.MODEL.weights_encoder)
        net_decoder = ModelBuilder.build_decoder(
            arch=cfg.MODEL.arch_decoder,
            fc_dim=cfg.MODEL.fc_dim,
            num_class=cfg.DATASET.num_class,
            weights=cfg.MODEL.weights_decoder,
            use_softmax=True)
    else:
        net_encoder = ModelBuilder.build_encoder(
            arch=cfg.MODEL.arch_encoder,
            fc_dim=cfg.MODEL.fc_dim,
            weights=cfg.MODEL.weights_encoder,
            gpu_flag=False)
        net_decoder = ModelBuilder.build_decoder(
            arch=cfg.MODEL.arch_decoder,
            fc_dim=cfg.MODEL.fc_dim,
            num_class=cfg.DATASET.num_class,
            weights=cfg.MODEL.weights_decoder,
            use_softmax=True,
            gpu_flag=False)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    # Dataset and Loader
    dataset_test = TestDataset(cfg.list_test, cfg.DATASET)
    loader_test = torch.utils.data.DataLoader(
        dataset_test,
        batch_size=cfg.TEST.batch_size,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=5,
        drop_last=True)
    if gpu_flag:
        segmentation_module.cuda()

    # Main loop
    test(segmentation_module, loader_test, gpu, gpu_flag, args, progress)

    print('语义分割处理完成!')
Example #15
0
def main(args):
    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(
        arch=args.arch_encoder,
        fc_dim=args.fc_dim,
        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(
        arch=args.arch_decoder,
        fc_dim=args.fc_dim,
        nr_classes=args.nr_classes,
        weights=args.weights_decoder)

    # TODO(LYC):: move criterion outside model.
    # crit = nn.NLLLoss(ignore_index=-1)

    if args.arch_decoder.endswith('deepsup'):
        segmentation_module = SegmentationModule(
            net_encoder, net_decoder, args.deep_sup_scale)
    else:
        segmentation_module = SegmentationModule(
            net_encoder, net_decoder)

    print('1 Epoch = {} iters'.format(args.epoch_iters))

    # create loader iterator
    iterator_train = create_multi_source_train_data_loader(args=args)

    # load nets into gpu
    if args.num_gpus > 1:
        segmentation_module = UserScatteredDataParallel(
            segmentation_module,
            device_ids=range(args.num_gpus))
        # For sync bn
        patch_replication_callback(segmentation_module)
    segmentation_module.cuda()

    # Set up optimizers
    nets = (net_encoder, net_decoder)
    optimizers = create_optimizers(nets, args)

    # Main loop
    history = {'train': {'epoch': [], 'loss': [], 'acc': []}}

    for epoch in range(args.start_epoch, args.num_epoch + 1):
        train(segmentation_module, iterator_train, optimizers, history, epoch, args)

        # checkpointing
        checkpoint(nets, history, args, epoch)

    print('Training Done!')
Example #16
0
def main(args):
    torch.cuda.set_device(args.gpu)

    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(
        arch=args.arch_encoder,
        fc_dim=args.fc_dim,
        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(
        arch=args.arch_decoder,
        fc_dim=args.fc_dim,
        num_class=args.num_class,
        weights=args.weights_decoder,
        use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    # Dataset and Loader
    # list_test = [{'fpath_img': args.test_img}]
#     test_chk = []
#     testing = os.listdir("/home/teai/externalhd2/BDD100K/segmentation_v2/test/")
#     for i in testing:
#         if(i.endswith(".jpg")):
#             test_chk.append("/home/teai/externalhd2/BDD100K/segmentation_v2/test/"+i)
    test_chk = ['./05b07068-373666cb.jpg']
    print(type(args.test_imgs))
    list_test = [{'fpath_img': x} for x in test_chk]
    #list_test=[{'fpath_img': 'frame_143.png'},{'fpath_img': 'frame_100.png'},{'fpath_img': 'frame_1.png'}]
    #print("list_test",list_test)
    dataset_test = TestDataset(
        list_test, args, max_sample=args.num_val)
    loader_test = torchdata.DataLoader(
        dataset_test,
        batch_size=args.batch_size,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=5,
        drop_last=True)

    segmentation_module.cuda()

    # Main loop
    start=time.time()
    test(segmentation_module, loader_test, args)
    end=time.time()
Example #17
0
    def main(self):

        torch.cuda.set_device(self.gpu)

        # Network Builders
        net_encoder = ModelBuilder.build_encoder(
            arch=self.cfg.MODEL.arch_encoder,
            fc_dim=self.cfg.MODEL.fc_dim,
            weights=self.cfg.MODEL.weights_encoder)
        net_decoder = ModelBuilder.build_decoder(
            arch=self.cfg.MODEL.arch_decoder,
            fc_dim=self.cfg.MODEL.fc_dim,
            num_class=self.cfg.DATASET.num_class,
            weights=self.cfg.MODEL.weights_decoder,
            use_softmax=True)

        crit = nn.NLLLoss(ignore_index=-1)

        self.segmentation_module = SegmentationModule(net_encoder, net_decoder,
                                                      crit)
        self.segmentation_module.cuda()

        self.seg_pub = rospy.Publisher(self.img_out,
                                       sensor_msgs.msg.Image,
                                       queue_size=1)
        rospy.Subscriber(self.img_in, sensor_msgs.msg.Image,
                         self.image_callback)

        rospy.loginfo("Listening for image messages on topic %s..." %
                      self.img_in)
        rospy.loginfo("Publishing segmented images to topic %s..." %
                      self.img_out)

        rospy.loginfo("Waiting for loader from queue...")
        while not rospy.is_shutdown():
            rospy.sleep(0.01)
            try:
                loader = self.loader_q.get_nowait()
                self.ready = False
                self.run_inference(loader)
                self.ready = True
            except queue.Empty:
                pass

        rospy.spin()
def inference(cfg, image_path, gpu=0):
    torch.cuda.set_device(gpu)

    # Network Builders
    net_encoder = ModelBuilder.build_encoder(arch=cfg.MODEL.arch_encoder,
                                             fc_dim=cfg.MODEL.fc_dim,
                                             weights=cfg.MODEL.weights_encoder)
    net_decoder = ModelBuilder.build_decoder(arch=cfg.MODEL.arch_decoder,
                                             fc_dim=cfg.MODEL.fc_dim,
                                             num_class=cfg.DATASET.num_class,
                                             weights=cfg.MODEL.weights_decoder,
                                             use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    # Dataset and Loader
    '''
    dataset_test = TestDataset(
        cfg.list_test,
        cfg.DATASET)
    '''
    dataset_test = TestROPRidgeDataset(
        root_dataset=cfg.DATASET.root_dataset,
        opt=cfg.DATASET,
        img_folder=cfg.DATASET.img_folder_val,
        annotation_folder="annotations",
        anno_filename=cfg.DATASET.list_val,
        batch_per_gpu=cfg.TRAIN.batch_size_per_gpu)
    loader_test = torch.utils.data.DataLoader(
        dataset_test,
        batch_size=cfg.TEST.batch_size,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=5,
        drop_last=True)

    segmentation_module.cuda()

    # Main loop
    test(segmentation_module, image_path, gpu)

    print('Inference done!')
    def _prepare_module(self):
        net_encoder = ModelBuilder.build_encoder(
            arch=self.cfg.MODEL.arch_encoder,
            fc_dim=self.cfg.MODEL.fc_dim,
            weights=self.cfg.MODEL.weights_encoder)
        net_decoder = ModelBuilder.build_decoder(
            arch=self.cfg.MODEL.arch_decoder,
            fc_dim=self.cfg.MODEL.fc_dim,
            num_class=self.cfg.DATASET.num_class,
            weights=self.cfg.MODEL.weights_decoder,
            use_softmax=True)

        crit = nn.NLLLoss(ignore_index=-1)

        segmentation_module = SegmentationModule(net_encoder, net_decoder,
                                                 crit)
        segmentation_module.cuda()

        return segmentation_module
Example #20
0
def module_init(cfg):
    # Network Builders
    net_encoder = ModelBuilder.build_encoder(arch=cfg.MODEL.arch_encoder, \
                                             fc_dim=cfg.MODEL.fc_dim, \
                                             weights=cfg.MODEL.weights_encoder)
    net_decoder = ModelBuilder.build_decoder(arch=cfg.MODEL.arch_decoder, \
                                             fc_dim=cfg.MODEL.fc_dim, \
                                             num_class=cfg.DATASET.num_class, \
                                             weights=cfg.MODEL.weights_decoder, \
                                             use_softmax=True)
    # NLLLoss
    crit = nn.NLLLoss(ignore_index=-1)
    # Instantiate segmentation module
    segmentation_module = SegmentationModule(net_encoder, net_decoder,
                                             crit).cuda()
    # Evaluation mode
    segmentation_module.eval()

    return segmentation_module
Example #21
0
def main(args):
    torch.cuda.set_device(args.gpu_id)

    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(arch=args.arch_encoder,
                                        fc_dim=args.fc_dim,
                                        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(arch=args.arch_decoder,
                                        fc_dim=args.fc_dim,
                                        num_class=args.num_class,
                                        weights=args.weights_decoder,
                                        use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    # Dataset and Loader
    # list_test = [{'fpath_img': args.test_img}]

    # Pass a single image
    if args.test_imgs is not None:
        list_test = [{'fpath_img': x} for x in args.test_imgs]
    if args.test_img_path is not None:
        list_test = [{
            'fpath_img': os.path.join(args.test_img_path, x)
        } for x in os.listdir(args.test_img_path)]
        list_test = list_test[:20]
    dataset_test = TestDataset(list_test, args, max_sample=args.num_val)
    loader_test = torchdata.DataLoader(dataset_test,
                                       batch_size=args.batch_size,
                                       shuffle=False,
                                       collate_fn=user_scattered_collate,
                                       num_workers=5,
                                       drop_last=True)

    segmentation_module.cuda()

    # Main loop
    test(segmentation_module, loader_test, args)

    print('Inference done!')
Example #22
0
def loadModel(model):
    net_encoder = ModelBuilder.build_encoder(arch=model["encoder_arch"],
                                             fc_dim=model["fc_dim"],
                                             weights=os.path.join(
                                                 cwd,
                                                 model["encoder_weights"]))
    net_decoder = ModelBuilder.build_decoder(arch=model["decoder_arch"],
                                             fc_dim=model["fc_dim"],
                                             num_class=model["num_class"],
                                             weights=os.path.join(
                                                 cwd,
                                                 model["decoder_weights"]),
                                             use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)
    segmentation_module.cuda()
    return segmentation_module
Example #23
0
def main(args):
    torch.cuda.set_device(args.gpu)

    crit = nn.NLLLoss(ignore_index=-1)

    # Dataset and Loader
    dataset_val = Dataset(args,
                          split_name=args.split_name,
                          batch_per_gpu=args.batchsize)
    loader_val = torchdata.DataLoader(dataset_val,
                                      batch_size=args.batchsize,
                                      shuffle=False,
                                      collate_fn=user_scattered_collate,
                                      num_workers=5,
                                      drop_last=True)

    # Network Builders
    builder = ModelBuilder()
    print('Loading encoder from: %s' % (args.weights_encoder))
    print('Loading decoder from: %s' % (args.weights_decoder))
    net_encoder = builder.build_encoder(arch=args.arch_encoder,
                                        fc_dim=args.fc_dim,
                                        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(arch=args.arch_decoder,
                                        fc_dim=args.fc_dim,
                                        num_class=dataset_val.num_classes,
                                        weights=args.weights_decoder,
                                        use_softmax=True)
    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    segmentation_module.cuda()

    # Main loop
    if args.dataset.startswith('nyuv2sn'):
        metrics = evaluate_surface_normals(segmentation_module, loader_val,
                                           args)
    else:
        metrics = evaluate_segmentation(segmentation_module, loader_val, args)

    save_pickle(args.result_file, metrics)

    print('Evaluation Done!')
Example #24
0
def main(args):
    torch.cuda.set_device(args.gpu)

    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(
        arch=args.arch_encoder,
        fc_dim=args.fc_dim,
        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(
        arch=args.arch_decoder,
        fc_dim=args.fc_dim,
        num_class=args.num_class,
        weights=args.weights_decoder,
        use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    # Dataset and Loader
    if len(args.test_imgs) == 1 and os.path.isdir(args.test_imgs[0]):
        test_imgs = find_recursive(args.test_imgs[0])
    else:
        test_imgs = args.test_imgs
    list_test = [{'fpath_img': x} for x in test_imgs]
    dataset_test = TestDataset(
        list_test, args, max_sample=args.num_val)
    loader_test = torchdata.DataLoader(
        dataset_test,
        batch_size=args.batch_size,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=5,
        drop_last=True)

    segmentation_module.cuda()

    # Main loop
    test(segmentation_module, loader_test, args)

    print('Inference done!')
def main(cfg, gpu):
    torch.cuda.set_device(gpu)

    # Network Builders
    net_encoder = ModelBuilder.build_encoder(arch=cfg.MODEL.arch_encoder,
                                             fc_dim=cfg.MODEL.fc_dim,
                                             weights=cfg.MODEL.weights_encoder)
    net_decoder = ModelBuilder.build_decoder(arch=cfg.MODEL.arch_decoder,
                                             fc_dim=cfg.MODEL.fc_dim,
                                             num_class=cfg.DATASET.num_class,
                                             weights=cfg.MODEL.weights_decoder,
                                             use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    # Dataset and Loader
    dataset_test = TestDataset(cfg.list_test, cfg.DATASET)
    loader_test = torch.utils.data.DataLoader(
        dataset_test,
        batch_size=cfg.TEST.batch_size,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=5,
        drop_last=True)

    segmentation_module.cuda()
    colors = loadmat(cfg.DATASET.color_mat)['colors']
    names = {}
    with open(cfg.DATASET.object_info) as f:
        reader = csv.reader(f)
        next(reader)
        for row in reader:
            print(row)
            names[int(row[0])] = row[5].split(";")[0]

    # Main loop
    test(segmentation_module, loader_test, gpu, names, colors)

    print('Inference done!')
def setup_model(cfg_path, root, gpu=0):
    cfg.merge_from_file(cfg_path)
    
    # cfg.freeze()
    logger = setup_logger(distributed_rank=0)   # TODO
    logger.info("Loaded configuration file {}".format(cfg_path))
    logger.info("Running with config:\n{}".format(cfg))

    cfg.MODEL.arch_encoder = cfg.MODEL.arch_encoder.lower()
    cfg.MODEL.arch_decoder = cfg.MODEL.arch_decoder.lower()

    # absolute paths of model weights
    cfg.MODEL.weights_encoder = os.path.join(
        root, cfg.DIR, 'encoder_' + cfg.TEST.checkpoint)
    cfg.MODEL.weights_decoder = os.path.join(
        root, cfg.DIR, 'decoder_' + cfg.TEST.checkpoint)

    assert os.path.exists(cfg.MODEL.weights_encoder) and \
        os.path.exists(cfg.MODEL.weights_decoder), "checkpoint does not exitst!"
    
    torch.cuda.set_device(gpu)

    # Network Builders
    net_encoder = ModelBuilder.build_encoder(
        arch=cfg.MODEL.arch_encoder,
        fc_dim=cfg.MODEL.fc_dim,
        weights=cfg.MODEL.weights_encoder)
    net_decoder = ModelBuilder.build_decoder(
        arch=cfg.MODEL.arch_decoder,
        fc_dim=cfg.MODEL.fc_dim,
        num_class=cfg.DATASET.num_class,
        weights=cfg.MODEL.weights_decoder,
        use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)
    segmentation_module.cuda()
    
    return segmentation_module
Example #27
0
def setup_test(gpu_id=0,
               encoder_type="resnet50dilated",
               decoder_type="ppm_deepsup",
               fc_dim=2048,
               model_path="baseline-resnet50dilated-ppm_deepsup",
               num_class=150,
               suffix="_epoch_20.pth"):
    torch.cuda.set_device(gpu_id)

    # absolute paths of model weights
    weights_encoder = os.path.join(model_path, 'encoder' + suffix)
    weights_decoder = os.path.join(model_path, 'decoder' + suffix)

    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(arch=encoder_type,
                                        fc_dim=fc_dim,
                                        weights=weights_encoder)
    net_decoder = builder.build_decoder(arch=decoder_type,
                                        fc_dim=fc_dim,
                                        num_class=num_class,
                                        weights=weights_decoder,
                                        use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)
    segmentation_module.cuda()
    segmentation_module.eval()
    return segmentation_module
def load_model(args):
    args.arch_encoder = args.arch_encoder.lower()
    args.arch_decoder = args.arch_decoder.lower()
    print("Input arguments:")
    for key, val in vars(args).items():
        print("{:16} {}".format(key, val))

    # absolute paths of model weights
    args.weights_encoder = os.path.join(DIR_PATH, 'models', args.model_path,
                                        'encoder' + args.suffix)
    args.weights_decoder = os.path.join(DIR_PATH, 'models', args.model_path,
                                        'decoder' + args.suffix)

    assert os.path.exists(args.weights_encoder) and \
        os.path.exists(args.weights_encoder), 'checkpoint does not exitst!'

    torch.cuda.set_device(args.gpu)
    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(arch=args.arch_encoder,
                                        fc_dim=args.fc_dim,
                                        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(arch=args.arch_decoder,
                                        fc_dim=args.fc_dim,
                                        num_class=args.num_class,
                                        weights=args.weights_decoder,
                                        use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)
    segmentation_module.eval()
    if torch.cuda.is_available():
        segmentation_module.cuda()
    return segmentation_module
Example #29
0
def segment(args, h, w):
    # absolute paths of model weights
    weights_encoder = os.path.join(args.model_path, 'encoder' + args.suffix)
    weights_decoder = os.path.join(args.model_path, 'decoder' + args.suffix)

    assert os.path.exists(weights_encoder) and \
        os.path.exists(weights_encoder), 'checkpoint does not exitst!'

    builder = ModelBuilder()
    net_encoder = builder.build_encoder(arch=args.arch_encoder,
                                        fc_dim=args.fc_dim,
                                        weights=weights_encoder)
    net_decoder = builder.build_decoder(arch=args.arch_decoder,
                                        fc_dim=args.fc_dim,
                                        num_class=args.num_class,
                                        weights=weights_decoder,
                                        use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)
    segmentation_module.eval()
    segmentation_module.cuda()

    # Dataset and Loader
    data_dict = {"in": args.content, "tar": args.style}
    data = load_data(data_dict, args)

    # Main loop
    with torch.no_grad():
        res = test(segmentation_module, data, (h, w), args)
    print('Inference done!')
    return res
def main(cfg, gpu):
    torch.cuda.set_device(gpu)

    # Network Builders
    net_encoder = ModelBuilder.build_encoder(arch=cfg.MODEL.arch_encoder,
                                             fc_dim=cfg.MODEL.fc_dim,
                                             weights=cfg.MODEL.weights_encoder)
    net_decoder = ModelBuilder.build_decoder(arch=cfg.MODEL.arch_decoder,
                                             fc_dim=cfg.MODEL.fc_dim,
                                             num_class=cfg.DATASET.num_class,
                                             weights=cfg.MODEL.weights_decoder,
                                             use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)
    #crit = nn.CrossEntropyLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    # Dataset and Loader
    dataset_test = TestDataset(cfg.list_test, cfg.DATASET)
    loader_test = torch.utils.data.DataLoader(
        dataset_test,
        batch_size=cfg.TEST.batch_size,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=5,
        drop_last=True)

    segmentation_module.cuda()

    #    dump_input = torch.rand((1, 3, 1920, 1080))
    #    with open ('dump_model_2.txt', 'w') as file:
    #         file.write(get_model_summary(segmentation_module.cuda(), dump_input.cuda(), verbose=True))

    # Main loop
    test(segmentation_module, loader_test, gpu)

    print('Inference done!')
def main(args):
    torch.cuda.set_device(args.gpu)

    # Network Builders
    builder = ModelBuilder()
    
    unet = builder.build_unet(num_class=args.num_class,
        arch=args.arch_unet,
        weights=args.weights_unet)

    crit = Loss()

    segmentation_module = SegmentationModule(unet, crit)
    
    test_augs = Compose([PaddingCenterCrop(224)])
    
    dataset_val = AC17(
            root=args.data_root,
            split='val',
            k_split=args.k_split,
            augmentations=test_augs,
            img_norm=args.img_norm)
    ac17_val = load2D(dataset_val, split='val', deform=False)
    loader_val = data.DataLoader(
        ac17_val,
        batch_size=1,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=5,
        drop_last=True)

    segmentation_module.cuda()

    # Main loop
    evaluate(segmentation_module, loader_val, args)

    print('Evaluation Done!')
Example #32
0
def main(args):
    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(arch=args.arch_encoder,
                                        fc_dim=args.fc_dim,
                                        weights=args.weights_encoder)
    nr_classes = broden_dataset.nr.copy()
    nr_classes['part'] = sum(
        [len(parts) for obj, parts in broden_dataset.object_part.items()])
    net_decoder = builder.build_decoder(arch=args.arch_decoder,
                                        fc_dim=args.fc_dim,
                                        nr_classes=nr_classes,
                                        weights=args.weights_decoder)

    # TODO(LYC):: move criterion outside model.
    # crit = nn.NLLLoss(ignore_index=-1)

    if args.arch_decoder.endswith('deepsup'):
        segmentation_module = SegmentationModule(net_encoder, net_decoder,
                                                 args.deep_sup_scale)
    else:
        segmentation_module = SegmentationModule(net_encoder, net_decoder)

    print('1 Epoch = {} iters'.format(args.epoch_iters))

    # create loader iterator
    iterator_train = create_multi_source_train_data_loader(args=args)

    # load nets into gpu
    if args.num_gpus > 1:
        segmentation_module = UserScatteredDataParallel(segmentation_module,
                                                        device_ids=range(
                                                            args.num_gpus))
        # For sync bn
        patch_replication_callback(segmentation_module)
    segmentation_module.cuda()

    # Set up optimizers
    nets = (net_encoder, net_decoder)
    optimizers = create_optimizers(nets, args)

    # Main loop
    history = {'train': {'epoch': [], 'loss': [], 'acc': []}}

    for epoch in range(args.start_epoch, args.num_epoch + 1):
        train(segmentation_module, iterator_train, optimizers, history, epoch,
              args)

        # checkpointing
        checkpoint(nets, history, args, epoch)

    print('Training Done!')
Example #33
0
def main(cfg, gpu,args):



    num_class =args.num_class 
    torch.cuda.set_device(gpu)

    # Network Builders
    net_encoder = ModelBuilder.build_encoder(
        arch=cfg.MODEL.arch_encoder,
        fc_dim=cfg.MODEL.fc_dim,
        weights=cfg.MODEL.weights_encoder)
    net_decoder = ModelBuilder.build_decoder(
        arch=cfg.MODEL.arch_decoder,
        fc_dim=cfg.MODEL.fc_dim,
        num_class=num_class,
        weights=cfg.MODEL.weights_decoder,
        use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    segmentation_module.cuda(args.start_gpu)
    with open(os.path.join(args.dataroot,args.split+'.txt')) as f:
            lines=f.readlines()
            videolists = [line[:-1] for line in lines]


    # Dataset and Loader
    evaluator = Evaluator(num_class)
    eval_video = Evaluator(num_class)
    evaluator.reset()
    eval_video.reset()
    total_vmIOU=0.0
    total_vfwIOU=0.0
    total_video = len(videolists)
    v = []
    n = []
    for video in videolists:
        eval_video.reset()
        dataset_test = TestDataset(
            args.dataroot,
            video,args)
        loader_test = torch.utils.data.DataLoader(
            dataset_test,
            batch_size=args.batchsize,
            shuffle=False,
            num_workers=5,
            drop_last=False)
    # Main loop
        test(segmentation_module, loader_test, gpu,args,evaluator,eval_video,video)
        v_mIOU =eval_video.Mean_Intersection_over_Union()
        v.append(v)
        n.append(video)
        print(video, v_mIOU)
        total_vmIOU += v_mIOU
        v_fwIOU = eval_video.Frequency_Weighted_Intersection_over_Union()

        total_vfwIOU += v_fwIOU
    with open("vmiou_hr.pkl", 'wb') as f:
        pkl.dump([v, n], f)
    total_vmIOU  = total_vmIOU/total_video
    total_vfwIOU = total_vfwIOU/total_video

    Acc = evaluator.Pixel_Accuracy()
    Acc_class = evaluator.Pixel_Accuracy_Class()
    mIoU = evaluator.Mean_Intersection_over_Union()
    FWIoU = evaluator.Frequency_Weighted_Intersection_over_Union()
    print("Acc:{}, Acc_class:{}, mIoU:{}, fwIoU: {}, video mIOU: {}, video fwIOU: {}".format(Acc, Acc_class, mIoU, FWIoU,total_vmIOU,total_vfwIOU))

    print('Inference done!')