config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) dataset=sys.argv[3] vis=False output_dir = cfg['path_to_output'] output_img = output_dir+"/" +dataset if not(os.path.exists(output_img)): os.makedirs(output_img) bop_dir,test_dir,model_plys,\ model_info,model_ids,rgb_files,\ depth_files,mask_files,gts,\ cam_param_global,scene_cam = bop_io.get_dataset(cfg,dataset,incl_param=True,train=False) im_width,im_height =cam_param_global['im_size'] cam_K = cam_param_global['K'] model_params =inout.load_json(os.path.join(bop_dir+"/models_xyz/",cfg['norm_factor_fn'])) if(dataset=='itodd'): img_type='gray' else: img_type='rgb' if("target_obj" in cfg.keys()): target_obj = cfg['target_obj'] remove_obj_id=[] incl_obj_id=[]
def rmfield(a, *fieldnames_to_remove): return a[[ name for name in a.dtype.names if name not in fieldnames_to_remove ]] if (len(sys.argv) < 2): print( "python3 tools/2_1_ply_file_to_3d_coord_model.py [cfg_fn] [dataset_name]" ) cfg_fn = sys.argv[1] cfg = inout.load_json(cfg_fn) dataset = sys.argv[2] bop_dir, source_dir, model_plys, model_info, model_ids, rgb_files, depth_files, mask_files, gts, cam_param_global = bop_io.get_dataset( cfg, dataset) if not (os.path.exists(bop_dir + "/models_xyz/")): os.makedirs(bop_dir + "/models_xyz/") norm_factor = bop_dir + "/models_xyz/" + "norm_factor.json" param = {} for m_id, model_ply in enumerate(model_plys): model_id = model_ids[m_id] m_info = model_info['{}'.format(model_id)] keys = m_info.keys() sym_continous = [0, 0, 0, 0, 0, 0] center_x = center_y = center_z = True if ('symmetries_discrete' in keys): center_x = center_y = center_z = False print("keep origins of the object when it has symmetric poses")
if p > 0: y_disc[:] = 0 return X_disc, y_disc loss_weights = [100, 1] train_gen_first = False load_recent_weight = True dataset = sys.argv[3] cfg_fn = sys.argv[2] #"cfg/cfg_bop2019.json" cfg = inout.load_json(cfg_fn) bop_dir, source_dir, model_plys, model_info, model_ids, rgb_files, depth_files, mask_files, gts, cam_param_global, scene_cam = bop_io.get_dataset( cfg, dataset, incl_param=True) im_width, im_height = cam_param_global['im_size'] weight_prefix = "pix2pose" obj_id = int(sys.argv[4]) #identical to the number for the ply file. weight_dir = bop_dir + "/pix2pose_weights/{:02d}".format(obj_id) if not (os.path.exists(weight_dir)): os.makedirs(weight_dir) back_dir = sys.argv[5] data_dir = bop_dir + "/train_xyz/{:02d}".format(obj_id) batch_size = 50 datagenerator = dataio.data_generator(data_dir, back_dir, batch_size=batch_size, res_x=im_width, res_y=im_height)
augment_inplane = 30 if len(sys.argv) < 3: print( "rendering 3d coordinate images using a converted ply file, format of 6D pose challange(http://cmp.felk.cvut.cz/sixd/challenge_2017/) can be used" ) print( "python3 tools/2_2_render_pix2pose_training.py [cfg_fn] [dataset_name]" ) else: cfg_fn = sys.argv[1] #"cfg/cfg_bop2019.json" cfg = inout.load_json(cfg_fn) dataset = sys.argv[2] bop_dir,source_dir,model_plys,model_info,model_ids,rgb_files,\ depth_files,mask_files,gts,cam_param_global,scene_cam =\ bop_io.get_dataset(cfg,dataset,incl_param=True) xyz_target_dir = bop_dir + "/train_xyz" im_width, im_height = cam_param_global['im_size'] cam_K = cam_param_global['K'] #check if the image dimension is the same rgb_fn = rgb_files[0] img_temp = inout.load_im(rgb_fn) if (img_temp.shape[0] != im_height or img_temp.shape[1] != im_width): print("the size of training images is different from test images") im_height = img_temp.shape[0] im_width = img_temp.shape[1] ren = Renderer((im_width, im_height), cam_K) t_model = -1 if (len(sys.argv) == 3):
from mrcnn import utils import mrcnn.model as modellib from mrcnn import visualize from mrcnn.model import log from imgaug import augmenters as iaa from tools.mask_rcnn_util import BopDetectConfig,BopDataset import skimage if(len(sys.argv)!=3): print("python3 tools/1_2_train_maskrcnn.py [cfg_fn] [dataset]") cfg_fn = sys.argv[1] #"cfg/cfg_bop2019.json" cfg = inout.load_json(cfg_fn) dataset=sys.argv[2] bop_dir,_,_,_,model_ids,_,_,_,_,cam_param_global = bop_io.get_dataset(cfg,dataset,train=True) im_width,im_height = cam_param_global['im_size'] MODEL_DIR = os.path.join(bop_dir, "weight_detection") config = BopDetectConfig(dataset=dataset, num_classes=model_ids.shape[0]+1,#1+len(model_plys), im_width=im_width,im_height=im_height) config.display() dataset_train = BopDataset() dataset_train.set_dataset(dataset,model_ids, os.path.join(bop_dir,"train_detect")) dataset_train.load_dataset() dataset_train.prepare() model = modellib.MaskRCNN(mode="training", config=config,
from bop_toolkit_lib import inout from tools import bop_io import copy #YCB(have to check) - > LMO #HB, ITODD -> T-LESS ref_gt = inout.load_scene_gt( os.path.join("/home/kiru/media/hdd/bop/tless/train_render_reconst/000001", "scene_gt.json")) ref_camera = inout.load_scene_camera( os.path.join("/home/kiru/media/hdd/bop/tless/train_render_reconst/000001", "scene_camera.json")) #bop_dir,source_dir,model_plys,model_info,model_ids,rgb_files,depth_files,mask_files,gts,cam_param_global = bop_io.get_dataset('hb',train=True) #bop_dir,source_dir,model_plys,model_info,model_ids,rgb_files,depth_files,mask_files,gts,cam_param_global = bop_io.get_dataset('itodd',train=True) bop_dir, source_dir, model_plys, model_info, model_ids, rgb_files, depth_files, mask_files, gts, cam_param_global = bop_io.get_dataset( 'ycbv', train=True) im_width, im_height = cam_param_global['im_size'] camK = cam_param_global['K'] cam_K_list = np.array(camK).reshape(-1) ren = Renderer((im_width, im_height), camK) source_dir = bop_dir + "/train" if not (os.path.exists(source_dir)): os.makedirs(source_dir) for i in range(len(model_plys)): target_dir = source_dir + "/{:06d}".format(model_ids[i]) if not (os.path.exists(target_dir)): os.makedirs(target_dir) if not (os.path.exists(target_dir + "/rgb")): os.makedirs(target_dir + "/rgb") if not (os.path.exists(target_dir + "/depth")):