def __init__(self, dataset_name, cls_type="duck"):
        self.config = Config(dataset_name='linemod', cls_type=cls_type)
        self.bs_utils = Basic_Utils(self.config)
        self.dataset_name = dataset_name
        self.xmap = np.array([[j for i in range(640)] for j in range(480)])
        self.ymap = np.array([[i for i in range(640)] for j in range(480)])

        self.trancolor = transforms.ColorJitter(0.2, 0.2, 0.2, 0.05)
        self.norm = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.224])
        self.obj_dict = self.config.lm_obj_dict

        self.cls_type = cls_type
        self.cls_id = self.obj_dict[cls_type]
        print("cls_id in lm_dataset.py", self.cls_id)
        self.root = os.path.join(self.config.lm_root, 'Linemod_preprocessed')
        self.cls_root = os.path.join(self.root, "data/%02d/" % self.cls_id)
        self.rng = np.random
        meta_file = open(os.path.join(self.cls_root, 'gt.yml'), "r")
        self.meta_lst = yaml.load(meta_file)
        if dataset_name == 'train':
            self.add_noise = True
            real_img_pth = os.path.join(self.cls_root, "train.txt")
            self.real_lst = self.bs_utils.read_lines(real_img_pth)

            rnd_img_pth = os.path.join(
                self.root, "renders/{}/file_list.txt".format(cls_type))
            try:
                self.rnd_lst = self.bs_utils.read_lines(rnd_img_pth)
            except:  # No synthetic rendered data.
                print(
                    "Train without rendered data from https://github.com/ethnhe/raster_triangle"
                )
                self.rnd_lst = []

            fuse_img_pth = os.path.join(
                self.root, "fuse/{}/file_list.txt".format(cls_type))
            try:
                self.fuse_lst = self.bs_utils.read_lines(fuse_img_pth)
            except:  # No fuse dataset
                print(
                    "Train without fuse data from https://github.com/ethnhe/raster_triangle"
                )
                self.fuse_lst = []
            self.all_lst = self.real_lst + self.rnd_lst + self.fuse_lst
        else:
            self.add_noise = False
            self.pp_data = None
            if os.path.exists(self.config.preprocessed_testset_pth
                              ) and self.config.use_preprocess:
                print('Loading valtestset.')
                with open(self.config.preprocessed_testset_pth, 'rb') as f:
                    self.pp_data = pkl.load(f)
                self.all_lst = [i for i in range(len(self.pp_data))]
                print('Finish loading valtestset.')
            else:
                tst_img_pth = os.path.join(self.cls_root, "test.txt")
                self.tst_lst = self.bs_utils.read_lines(tst_img_pth)
                self.all_lst = self.tst_lst
        print("{}_dataset_size: ".format(dataset_name), len(self.all_lst))
Exemple #2
0
 def __init__(self, weights_path):
     # initialize configs and model object
     self.config = Config(dataset_name='ycb')
     self.bs_utils = Basic_Utils(self.config)
     self.model = self.define_network(weights_path)
     self.rgb = None
     self.cld = None
     self.cld_rgb_nrm = None
     self.choose = None
     self.cls_id_lst = None
Exemple #3
0
from lib import PVN3D
from datasets.CrankSlider.CrankSlider_dataset import CrankSlider_Dataset
from lib.loss import OFLoss, FocalLoss
from common import Config
from lib.utils.sync_batchnorm import convert_model
from lib.utils.warmup_scheduler import CyclicLR
from lib.utils.pvn3d_eval_utils import TorchEval
import lib.utils.etw_pytorch_utils as pt_utils
import resource
from collections import namedtuple
from lib.utils.basic_utils import Basic_Utils

rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (30000, rlimit[1]))
config = Config(dataset_name='CrankSlider')
bs_utl = Basic_Utils(config)

parser = argparse.ArgumentParser(description="Arg parser")
parser.add_argument(
    "-weight_decay",
    type=float,
    default=0,
    help="L2 regularization coeff [default: 0.0]",
)
parser.add_argument("-lr",
                    type=float,
                    default=1e-2,
                    help="Initial learning rate [default: 1e-2]")
parser.add_argument(
    "-lr_decay",
    type=float,
Exemple #4
0
import cv2
import pcl
import torch
import os.path
import numpy as np
import torchvision.transforms as transforms
from PIL import Image
from common import Config
import pickle as pkl
from lib.utils.basic_utils import Basic_Utils
import scipy.io as scio
import scipy.misc
# from neupeak.utils.webcv2 import imshow, waitKey

config = Config(dataset_name='ycb')
bs_utils = Basic_Utils(config)
DEBUG = False


class LM_Dataset():
    def __init__(self, dataset_name):
        self.dataset_name = dataset_name
        self.xmap = np.array([[j for i in range(640)] for j in range(480)])
        self.ymap = np.array([[i for i in range(640)] for j in range(480)])
        self.diameters = {}
        self.trancolor = transforms.ColorJitter(0.2, 0.2, 0.2, 0.05)
        self.norm = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.224])
        self.cls_lst = bs_utils.read_lines(config.ycb_cls_lst_p)
        self.obj_dict = {}
        for cls_id, cls in enumerate(self.cls_lst, start=1):
import sys
sys.path.remove(
    '/opt/ros/kinetic/lib/python2.7/dist-packages'
)  #This is to remove ROS-python from the PYTHONPATH which messes up the Python 3 env this project works with
from lib.utils.basic_utils import Basic_Utils
from common import Config
import numpy as np
from PIL import Image
import pcl
from os import listdir
''' This script writes cloud, normals and sampled-points for all the training data so it does not have to be calculated from training images during the training '''

cfg = Config(dataset_name='openDR')
bs_utils = Basic_Utils(cfg)


def get_normal(cld):
    cloud = pcl.PointCloud()
    cld = cld.astype(np.float32)
    cloud.from_array(cld)
    ne = cloud.make_NormalEstimation()
    kdtree = cloud.make_kdtree()
    ne.set_SearchMethod(kdtree)
    ne.set_KSearch(50)
    n = ne.compute()
    n = n.to_array()
    return n


## This script is only for openDR dataset ##
Exemple #6
0
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
from torch.nn.modules.loss import _Loss
from torch.autograd import Variable
import concurrent.futures
import numpy as np
import pickle as pkl
from common import Config
from lib.utils.basic_utils import Basic_Utils
from lib.utils.meanshift_pytorch import MeanShiftTorch

config = Config(dataset_name='ycb')
bs_utils = Basic_Utils(config)
config_lm = Config(dataset_name="linemod")
bs_utils_lm = Basic_Utils(config_lm)
cls_lst = config.ycb_cls_lst
config_od = Config(dataset_name='openDR')
bs_utils_od = Basic_Utils(config_od)
config_cs = Config(dataset_name='CrankSlider')
bs_utils_cs = Basic_Utils(config_cs)


class VotingType:
    BB8 = 0
    BB8C = 1
    BB8S = 2
    VanPts = 3
    Farthest = 5
Exemple #7
0
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
from torch.nn.modules.loss import _Loss
from torch.autograd import Variable
import concurrent.futures
import numpy as np
import pickle as pkl
from common import Config
from lib.utils.basic_utils import Basic_Utils
from lib.utils.meanshift_pytorch import MeanShiftTorch

config = Config(dataset_name='ycb')
bs_utils = Basic_Utils(config)
config_lm = Config(dataset_name="linemod")
bs_utils_lm = Basic_Utils(config_lm)
cls_lst = config.ycb_cls_lst


class VotingType:
    BB8 = 0
    BB8C = 1
    BB8S = 2
    VanPts = 3
    Farthest = 5
    Farthest4 = 6
    Farthest12 = 7
    Farthest16 = 8
    Farthest20 = 9