def __init__(self, args, init_train_info={}, sub_dir=None): self.args = args misc.ensure_dir(args.logdir) sub_dir = args.continue_from or sub_dir or misc.datetimestr() self.logdir = os.path.join(args.logdir, sub_dir) misc.ensure_dir(self.logdir) self._setup_log_file() self._create_train_info(args, init_train_info)
def transfer(data_16000_dir, data_dir, args): misc.ensure_dir(data_dir, erase_old=True) for file in os.listdir(data_16000_dir): in_folder = os.path.join(data_16000_dir, file) if not os.path.isdir(in_folder): continue out_folder = os.path.join(data_dir, file + '.pth') misc.ensure_dir(out_folder, erase_old=True) transfer_folder(in_folder, out_folder, args)
def transfer(raw_data_dir, data_dir, sample_rate): import misc misc.ensure_dir(data_dir, erase_old=True) for file in os.listdir(raw_data_dir): in_folder = os.path.join(raw_data_dir, file) if not os.path.isdir(in_folder): continue out_folder = os.path.join(data_dir, file) misc.ensure_dir(out_folder, erase_old=True) transfer_folder(in_folder, out_folder, sample_rate)
def save_errors(_error_sign, _scene_errs): # Save the calculated errors to a JSON file. errors_path = p['out_errors_tpath'].format( eval_path=p['eval_path'], result_name=result_name, error_sign=_error_sign, scene_id=scene_id) misc.ensure_dir(os.path.dirname(errors_path)) misc.log('Saving errors to: {}'.format(errors_path)) inout.save_json(errors_path, _scene_errs)
def end_log_game_action(self, game_name): import json if game_name not in self._game_info: return game = self._game_info.get(game_name) if 'actions' in game: file_path = os.path.join(self.logdir, 'game_info') misc.ensure_dir(file_path) file_path = os.path.join(file_path, game_name + '.actions.json') misc.save_file(file_path, json.dumps(game['actions'])) del self._game_info[game_name]
def create_manifest(manifest_file, data_files): data = sort_files(data_files) print 'dumping: "%s"...' % manifest_file with io.FileIO(manifest_file, "w") as file: for d in tqdm(data): label = int(d[0].split('/')[-1].split('-')[1]) line = (d[0] + ',{},{}\n').format(label, d[1]) file.write(line.encode('utf-8')) if __name__ == '__main__': from train import parse_arguments import os, misc args = parse_arguments() wav_files = [ os.path.join(dirpath, f) for dirpath, dirnames, files in os.walk(args.data_dir) for f in fnmatch.filter(files, '*.wav.pth') ] print 'Number of files: {}'.format(len(wav_files)) random.shuffle(wav_files) train_size = len(wav_files) - args.eval_size - args.test_size train_data = wav_files[0:train_size] eval_data = wav_files[train_size:train_size + args.eval_size] test_data = wav_files[train_size + args.eval_size:] misc.ensure_dir(args.manifest_dir, erase_old=True) create_manifest(os.path.join(args.manifest_dir, 'train.csv'), train_data) create_manifest(os.path.join(args.manifest_dir, 'eval.csv'), eval_data) create_manifest(os.path.join(args.manifest_dir, 'test.csv'), test_data)
'--local_model', default= "C:\\Users\\fcalcagno\\Documents\\pytorch-playground_local\\svhn\\log\\best-90.pth", help='Where the local model is located') args = parser.parse_args() args.logdir = os.path.join(os.path.dirname(__file__), args.logdir) misc.logger.init(args.logdir, 'train_log') print = misc.logger.info # select gpu args.gpu = 1 args.ngpu = 1 # logger misc.ensure_dir(args.logdir) print("=================FLAGS==================") for k, v in args.__dict__.items(): print('{}: {}'.format(k, v)) print("========================================") # seed args.cuda = torch.cuda.is_available() torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) # data loader and model train_loader, test_loader = dataset_digits.get(batch_size=args.batch_size, csv_path=args.csv_path, data_root=args.data_root,
scene_ids = dataset_params.get_present_scene_ids(dp_split) for scene_id in scene_ids: # Load scene GT. scene_gt_path = dp_split['scene_gt_tpath'].format(scene_id=scene_id) scene_gt = inout.load_scene_gt(scene_gt_path) # Load scene camera. scene_camera_path = dp_split['scene_camera_tpath'].format( scene_id=scene_id) scene_camera = inout.load_scene_camera(scene_camera_path) # Create folders for the output masks (if they do not exist yet). mask_dir_path = os.path.dirname(dp_split['mask_tpath'].format( scene_id=scene_id, im_id=0, gt_id=0)) misc.ensure_dir(mask_dir_path) mask_visib_dir_path = os.path.dirname(dp_split['mask_visib_tpath'].format( scene_id=scene_id, im_id=0, gt_id=0)) misc.ensure_dir(mask_visib_dir_path) # Initialize a renderer. misc.log('Initializing renderer...') width, height = dp_split['im_size'] ren = renderer.create_renderer(width, height, renderer_type=p['renderer_type'], mode='depth') # Add object models. for obj_id in dp_model['obj_ids']:
'meshlab_script_path': os.path.join(os.path.dirname(os.path.realpath(__file__)), 'meshlab_scripts', r'remesh_for_eval_cell=0.25.mlx'), } ################################################################################ # Load dataset parameters. dp_model_in = dataset_params.get_model_params(p['datasets_path'], p['dataset'], p['model_in_type']) dp_model_out = dataset_params.get_model_params(p['datasets_path'], p['dataset'], p['model_out_type']) # Attributes to save for the output models. attrs_to_save = [] # Process models of all objects in the selected dataset. for obj_id in dp_model_in['obj_ids']: misc.log('\n\n\nProcessing model of object {}...\n'.format(obj_id)) model_in_path = dp_model_in['model_tpath'].format(obj_id=obj_id) model_out_path = dp_model_out['model_tpath'].format(obj_id=obj_id) misc.ensure_dir(os.path.dirname(model_out_path)) misc.run_meshlab_script(p['meshlab_server_path'], p['meshlab_script_path'], model_in_path, model_out_path, attrs_to_save) misc.log('Done.')
# Output path templates. out_rgb_tpath =\ os.path.join('{out_path}', '{obj_id:06d}', 'rgb', '{im_id:06d}.png') out_depth_tpath =\ os.path.join('{out_path}', '{obj_id:06d}', 'depth', '{im_id:06d}.png') out_scene_camera_tpath =\ os.path.join('{out_path}', '{obj_id:06d}', 'scene_camera.json') out_scene_gt_tpath =\ os.path.join('{out_path}', '{obj_id:06d}', 'scene_gt.json') out_views_vis_tpath =\ os.path.join('{out_path}', '{obj_id:06d}', 'views_radius={radius}.ply') ################################################################################ out_path = out_tpath.format(dataset=dataset) misc.ensure_dir(out_path) # Load dataset parameters. dp_split_test = dataset_params.get_split_params(datasets_path, dataset, 'test') dp_model = dataset_params.get_model_params(datasets_path, dataset, model_type) dp_camera = dataset_params.get_camera_params(datasets_path, dataset, cam_type) if not obj_ids: obj_ids = dp_model['obj_ids'] # Image size and K for the RGB image (potentially with SSAA). im_size_rgb = [int(round(x * float(ssaa_fact))) for x in dp_camera['im_size']] K_rgb = dp_camera['K'] * ssaa_fact # Intrinsic parameters for RGB rendering. fx_rgb, fy_rgb, cx_rgb, cy_rgb =\
}) # Visualization of the visibility mask. if p['vis_visibility_masks']: depth_im_vis = visualization.depth_for_vis(depth, 0.2, 1.0) depth_im_vis = np.dstack([depth_im_vis] * 3) visib_gt_vis = visib_gt.astype(np.float) zero_ch = np.zeros(visib_gt_vis.shape) visib_gt_vis = np.dstack([zero_ch, visib_gt_vis, zero_ch]) vis = 0.5 * depth_im_vis + 0.5 * visib_gt_vis vis[vis > 1] = 1 vis_path = p['vis_mask_visib_tpath'].format( delta=p['delta'], dataset=p['dataset'], split=p['dataset_split'], scene_id=scene_id, im_id=im_id, gt_id=gt_id) misc.ensure_dir(os.path.dirname(vis_path)) inout.save_im(vis_path, vis) # Save the info for the current scene. scene_gt_info_path = dp_split['scene_gt_info_tpath'].format( scene_id=scene_id) misc.ensure_dir(os.path.dirname(scene_gt_info_path)) inout.save_json(scene_gt_info_path, scene_gt_info)
for obj_id in dp_model['obj_ids']: # Load object model. misc.log('Loading 3D model of object {}...'.format(obj_id)) model_path = dp_model['model_tpath'].format(obj_id=obj_id) ren.add_object(obj_id, model_path) poses = misc.get_symmetry_transformations(models_info[obj_id], p['max_sym_disc_step']) for pose_id, pose in enumerate(poses): for view_id, view in enumerate(p['views']): R = view['R'].dot(pose['R']) t = view['R'].dot(pose['t']) + view['t'] vis_rgb = ren.render_object(obj_id, R, t, fx, fy, cx, cy)['rgb'] # Path to the output RGB visualization. vis_rgb_path = p['vis_rgb_tpath'].format(vis_path=p['vis_path'], dataset=p['dataset'], obj_id=obj_id, view_id=view_id, pose_id=pose_id) misc.ensure_dir(os.path.dirname(vis_rgb_path)) inout.save_im(vis_rgb_path, vis_rgb) misc.log('Done.')
def vis_object_poses(poses, K, renderer, rgb=None, depth=None, vis_rgb_path=None, vis_depth_diff_path=None, vis_rgb_resolve_visib=False): """Visualizes 3D object models in specified poses in a single image. Two visualizations are created: 1. An RGB visualization (if vis_rgb_path is not None). 2. A Depth-difference visualization (if vis_depth_diff_path is not None). :param poses: List of dictionaries, each with info about one pose: - 'obj_id': Object ID. - 'R': 3x3 ndarray with a rotation matrix. - 't': 3x1 ndarray with a translation vector. - 'text_info': Info to write at the object (see write_text_on_image). :param K: 3x3 ndarray with an intrinsic camera matrix. :param renderer: Instance of the Renderer class (see renderer.py). :param rgb: ndarray with the RGB input image. :param depth: ndarray with the depth input image. :param vis_rgb_path: Path to the output RGB visualization. :param vis_depth_diff_path: Path to the output depth-difference visualization. :param vis_rgb_resolve_visib: Whether to resolve visibility of the objects (i.e. only the closest object is visualized at each pixel). """ fx, fy, cx, cy = K[0, 0], K[1, 1], K[0, 2], K[1, 2] # Indicators of visualization types. vis_rgb = vis_rgb_path is not None vis_depth_diff = vis_depth_diff_path is not None if vis_rgb and rgb is None: raise ValueError( 'RGB visualization triggered but RGB image not provided.') if (vis_depth_diff or (vis_rgb and vis_rgb_resolve_visib)) and depth is None: raise ValueError( 'Depth visualization triggered but D image not provided.') # Prepare images for rendering. im_size = None ren_rgb = None ren_rgb_info = None ren_depth = None if vis_rgb: im_size = (rgb.shape[1], rgb.shape[0]) ren_rgb = np.zeros(rgb.shape, np.uint8) ren_rgb_info = np.zeros(rgb.shape, np.uint8) if vis_depth_diff: if im_size and im_size != (depth.shape[1], depth.shape[0]): raise ValueError('The RGB and D images must have the same size.') else: im_size = (depth.shape[1], depth.shape[0]) if vis_depth_diff or (vis_rgb and vis_rgb_resolve_visib): ren_depth = np.zeros((im_size[1], im_size[0]), np.float32) # Render the pose estimates one by one. for pose in poses: # Rendering. ren_out = renderer.render_object(pose['obj_id'], pose['R'], pose['t'], fx, fy, cx, cy) m_rgb = None if vis_rgb: m_rgb = ren_out['rgb'] m_mask = None if vis_depth_diff or (vis_rgb and vis_rgb_resolve_visib): m_depth = ren_out['depth'] # Get mask of the surface parts that are closer than the # surfaces rendered before. visible_mask = np.logical_or(ren_depth == 0, m_depth < ren_depth) m_mask = np.logical_and(m_depth != 0, visible_mask) ren_depth[m_mask] = m_depth[m_mask].astype(ren_depth.dtype) # Combine the RGB renderings. if vis_rgb: if vis_rgb_resolve_visib: ren_rgb[m_mask] = m_rgb[m_mask].astype(ren_rgb.dtype) else: ren_rgb_f = ren_rgb.astype(np.float32) + m_rgb.astype( np.float32) ren_rgb_f[ren_rgb_f > 255] = 255 ren_rgb = ren_rgb_f.astype(np.uint8) # Draw 2D bounding box and write text info. obj_mask = np.sum(m_rgb > 0, axis=2) ys, xs = obj_mask.nonzero() if len(ys): # bbox_color = model_color # text_color = model_color bbox_color = (0.3, 0.3, 0.3) text_color = (1.0, 1.0, 1.0) text_size = 11 bbox = misc.calc_2d_bbox(xs, ys, im_size) im_size = (obj_mask.shape[1], obj_mask.shape[0]) ren_rgb_info = draw_rect(ren_rgb_info, bbox, bbox_color) if 'text_info' in pose: text_loc = (bbox[0] + 2, bbox[1]) ren_rgb_info = write_text_on_image(ren_rgb_info, pose['text_info'], text_loc, color=text_color, size=text_size) # Blend and save the RGB visualization. if vis_rgb: misc.ensure_dir(os.path.dirname(vis_rgb_path)) vis_im_rgb = 0.5 * rgb.astype(np.float32) + \ 0.5 * ren_rgb.astype(np.float32) + \ 1.0 * ren_rgb_info.astype(np.float32) vis_im_rgb[vis_im_rgb > 255] = 255 inout.save_im(vis_rgb_path, vis_im_rgb.astype(np.uint8), jpg_quality=95) # Save the image of depth differences. if vis_depth_diff: misc.ensure_dir(os.path.dirname(vis_depth_diff_path)) # Calculate the depth difference at pixels where both depth maps are valid. valid_mask = (depth > 0) * (ren_depth > 0) depth_diff = valid_mask * (ren_depth.astype(np.float32) - depth) delta = 15 below_delta = valid_mask * (depth_diff < delta) below_delta_vis = (255 * below_delta).astype(np.uint8) depth_diff_vis = 255 * depth_for_vis(depth_diff - depth_diff.min()) depth_diff_vis = np.dstack( [below_delta_vis, depth_diff_vis, depth_diff_vis]).astype(np.uint8) depth_diff_vis[np.logical_not(valid_mask)] = 0 depth_diff_valid = depth_diff[valid_mask] depth_info = [ { 'name': 'min diff', 'fmt': ':.3f', 'val': np.min(depth_diff_valid) }, { 'name': 'max diff', 'fmt': ':.3f', 'val': np.max(depth_diff_valid) }, { 'name': 'mean diff', 'fmt': ':.3f', 'val': np.mean(depth_diff_valid) }, ] depth_diff_vis = write_text_on_image(depth_diff_vis, depth_info) inout.save_im(vis_depth_diff_path, depth_diff_vis)