def query_remove_logdir(logdir): from instanceseg.utils import misc import os import shutil if misc.y_or_n_input('Remove {}?'.format(logdir), default='n') == 'y': if os.path.exists(logdir): shutil.rmtree(logdir)
def run(my_trainer: trainer.Trainer, watching_validator_gpu=None, write_val_to_stdout=False): if watching_validator_gpu is not None: atexit.register(find_and_kill_watcher, my_trainer.exporter.out_dir) pid, pidout_filename, writer = validator.offload_validation_to_watcher( my_trainer, watching_validator_gpu, as_subprocess=not DEBUG_WATCHER, write_val_to_stdout=write_val_to_stdout) atexit.unregister(find_and_kill_watcher) atexit.register(terminate_watcher, pid, writer) try: my_trainer.train() misc.color_text('Training is complete!', 'OKGREEN') atexit.unregister(query_remove_logdir) except KeyboardInterrupt: if y_or_n_input('I\'ve stopped training. Finish script?', default='y') == 'n': raise print('Evaluating final model') val_loss, eval_metrics, (segmentation_visualizations, score_visualizations) = \ my_trainer.validate_split(should_export_visualizations=False) if eval_metrics is not None: eval_metrics = np.array(eval_metrics) eval_metrics *= 100 viz = visualization_utils.get_tile_image(segmentation_visualizations) write_np_array_as_img(os.path.join(here, 'viz_evaluate.png'), viz) return eval_metrics
def get_tiled_pred_gt_images(collated_stats_dict, img_types, labels_table, sorted_perf_outdir, overwrite=None, list_of_original_input_images=None): data_type_as_str = 'image_name_id' print('Saving images in order of {}'.format(data_type_as_str)) image_id_outdir = os.path.join(sorted_perf_outdir, '{}'.format('image_id')) image_name_ids = [ im_name.rstrip('.png') for im_name in get_image_file_list( extract_variable(collated_stats_dict, 'gt_json_file')) ] basename = '{}_'.format(data_type_as_str) image_names = [ os.path.join(image_id_outdir, basename.format() + '{}_{}'.format(i, x_val) + '.png') for i, x_val in enumerate(image_name_ids) ] if os.path.exists(image_id_outdir) and all([os.path.exists(i) for i in image_names]) and \ (overwrite is False or (overwrite is None and y_or_n_input('All files already exist in {}. Would you like to overwrite? y/N', default='n') == 'n')): print('Using existing images from {}'.format(image_id_outdir)) else: print('Loading image data') non_input_img_types = [i for i in img_types if i != 'input_image'] img_d = get_image_data(collated_stats_dict, non_input_img_types, labels_table) if 'input_image' in img_types: assert list_of_original_input_images is not None img_d['input_image'] = load_image_list( list_of_original_input_images) img_d['input_image'] = [ np.concatenate([im for _ in range(len(non_input_img_types))], axis=0) for im in img_d['input_image'] ] print('Tiling images') imgs_side_by_side = [ get_tile_image(list(imgs), (1, len(imgs)), margin_color=(0, 0, 0), margin_size=2) for imgs in zip(*[img_d[img_type] for img_type in img_types]) ] ids, image_names = show_images_in_order_of( imgs_side_by_side, image_name_ids, outdir=image_id_outdir, basename='{}_'.format(data_type_as_str)) print('Images saved to {}'.format(image_id_outdir)) return image_names
def write_images_and_confirm(dataloader, rule_as_string_to_user): img_dir = '/tmp/unittest/' if os.path.exists(img_dir): y_or_n = y_or_n_input('{} exists. Would you like to remove it? (y/n)'.format(img_dir)) if y_or_n == 'y': shutil.rmtree(img_dir) else: msg = 'Specify a new directory:' new_dir = input(msg) while new_dir != '' and os.path.exists(new_dir): new_dir = input('Directory already exists. \n' + msg) os.makedirs(img_dir) for idx, (d, (sem_lbl, inst_lbl)) in tqdm.tqdm(enumerate(dataloader), desc='Iterating through new dataloader', total=len(dataloader)): batch_sz = sem_lbl.size(0) sem_lbl_rgb = label2rgb(sem_lbl.numpy()) inst_lbl_rgb = label2rgb(inst_lbl.numpy()) for img_idx in range(batch_sz): img_untransformed, _ = \ TrainerExporter.untransform_data(dataloader, d[img_idx, ...], None) write_image(os.path.join(img_dir, 'inst_lbl_{:06d}.png'.format(idx)), inst_lbl_rgb[ img_idx, ...]) write_image(os.path.join(img_dir, 'sem_lbl_{:06d}.png'.format(idx)), sem_lbl_rgb[ img_idx, ...]) write_image(os.path.join(img_dir, 'img_{:06d}.png'.format(idx)), img_untransformed) msg_to_user = '******' \ '{} y/n:'.format(img_dir, rule_as_string_to_user) y_or_n = input(msg_to_user) if y_or_n == 'n': raise Exception('Test error according to user') y_or_n = y_or_n_input('Remove test directory {}?'.format(img_dir)) if y_or_n == 'y': shutil.rmtree(img_dir)
def panoptic_converter_from_rgb_ids( out_folder, out_json_file, labels_file_list, problem_config: instance_utils.InstanceProblemConfig, labels_table=None, VOID_RGB=(255, 255, 255), VOID_INSTANCE_G=255, overwrite=None): """ Takes predictions output from tester (special Trainer type) and outputs coco panoptic format Inputs should represent the different channels, where rgb2id creates the channel id (R + G*255 + B*255*255) """ # Replace split with file_list labels_table = labels_table or problem_config.labels_table if not os.path.isdir(out_folder): print("Creating folder {} for panoptic segmentation PNGs".format( out_folder)) os.mkdir(out_folder) categories_dict = {cat.id: cat for cat in labels_table} images = [] annotations = [] n_files = len(labels_file_list) cocopano_ext = '_cocopano.png' all_files_already_exist = os.path.exists(out_json_file) file_exists = [] for working_idx, label_f in tqdm.tqdm(enumerate(labels_file_list), desc='Finding files', total=n_files): file_name = label_f.split('/')[-1] out_file_name = file_name.replace('.png', cocopano_ext) file_exists.append( os.path.exists(os.path.join(out_folder, out_file_name))) all_files_already_exist = all_files_already_exist and all(file_exists) some_files_already_exist = any(file_exists) if all_files_already_exist: if overwrite is None: y_or_n = y_or_n_input('All files already exist. Overwrite?') if y_or_n == 'n': return elif overwrite is False: print('All files already exist.') return elif some_files_already_exist: print( 'Warning: some ({}/{}) files already existed. I may be overwriting them.' .format(sum(file_exists), len(file_exists))) for working_idx, label_f in tqdm.tqdm( enumerate(labels_file_list), desc='Converting to COCO panoptic format', total=n_files): rgb_format = np.array(Image.open(label_f), dtype=np.uint8) assert len(rgb_format.shape) == 3, 'Image should be in rgb format' file_name = label_f.split('/')[-1] out_file_name = file_name.replace('.png', cocopano_ext) if os.path.exists(out_file_name): if not overwrite: continue assert file_name.rsplit('_', 2)[2] == 'sem255instid2rgb.png' image_id = file_name.rsplit('_', 2)[1] image_filename = '{}_image.png'.format(image_id) # image entry, id for image is its filename without extension images.append({ "id": image_id, "width": rgb_format.shape[1], "height": rgb_format.shape[0], "file_name": image_filename }) id_generator = IdGenerator(categories_dict) idx = 0 present_channel_colors = np.unique(rgb_format.reshape( -1, rgb_format.shape[2]), axis=0) present_channel_colors = [ c for c in present_channel_colors if rgb2id(c) != rgb2id(VOID_RGB) and c[1] != VOID_INSTANCE_G ] pan_format = np.zeros((rgb_format.shape[0], rgb_format.shape[1], 3), dtype=np.uint8) segm_info = [] pan_ids = np.zeros((rgb_format.shape[0], rgb_format.shape[1])) semantic_ids_not_in_category_dict = [] unique_segm_ids = [] for color_val in present_channel_colors: semantic_id = color_val[0] instance_count_id = color_val[1] is_crowd = instance_count_id < 1 if semantic_id not in categories_dict: if semantic_id not in semantic_ids_not_in_category_dict: semantic_ids_not_in_category_dict.append(semantic_id) continue if categories_dict[semantic_id]['isthing'] == 0: is_crowd = 0 mask = (rgb_format == color_val).all(axis=2) area, bbox = get_bounding_box(mask) segment_id = semantic_id * 1000 + instance_count_id pan_color = id2rgb(segment_id) pan_format[mask, :] = pan_color pan_ids[mask] = segment_id assert segment_id not in unique_segm_ids # every segment should be unique unique_segm_ids.append(segment_id) segm_info.append({ "id": int(segment_id), "category_id": int(semantic_id), "area": area, "bbox": bbox, "iscrowd": is_crowd }) if len(semantic_ids_not_in_category_dict) > 0: print( 'The following semantic ids were present in the image, but not in the categories dict ({catdict}): ' '{semids}'.format(catdict=categories_dict, semids=semantic_ids_not_in_category_dict)) Image.fromarray(pan_format).save( os.path.join(out_folder, out_file_name)) # Reverse the process and ensure we get the right id reloaded_pan_img = np.array(Image.open( os.path.join(out_folder, out_file_name)), dtype=np.uint32) reloaded_pan_id = rgb2id(reloaded_pan_img) assert np.all(reloaded_pan_id == pan_ids) # print('Max pan id: {}'.format(reloaded_pan_id.max())) if len(segm_info) == 0: raise Exception('No segments in this image') annotations.append({ 'image_id': image_id, 'file_name': out_file_name, "segments_info": segm_info }) # shutil.copy(label_f, os.path.join(out_folder, file_name)) if len(segm_info) != len(present_channel_colors): import ipdb ipdb.set_trace() assert len(segm_info) == len(present_channel_colors) d = { 'images': images, 'annotations': annotations, 'categories': [l.__dict__ for l in labels_table], } rm_json_if_breaks = not os.path.exists(out_json_file) try: save_json(d, out_json_file) except: if rm_json_if_breaks: os.remove(out_json_file) raise