def main(): print("Processing the dataset...\n") if not os.path.isdir(IMAGE_DIR): raise ValueError( "Depth map and normal map directory doesn't exist or there is an error in reading it." ) if not os.path.isdir(SDF_DIR): raise ValueError( "Points and SDF directory doesn't exist or there is an error in reading it." ) # Read training and validation images list, usually .txt file. train_examples = dataset_util.read_examples_list(TRAIN_DATA_LIST_NAME) val_examples = dataset_util.read_examples_list(VAL_DATA_LIST_NAME) # Run the create tf record method for the training data. print("Processing the training data...") create_tf_record(TRAIN_TF_RECORD_NAME, IMAGE_DIR, FOREGROUND_DIR, SDF_DIR, train_examples) print("DONE!\n") if VALIDATION_EXISTS: # Run the create tf record method for the validation data. print("Processing the validation data...\n") create_tf_record(VAL_TF_RECORD_NAME, IMAGE_DIR, FOREGROUND_DIR, SDF_DIR, val_examples) print("DONE!\n")
def main(unused_argv): if not os.path.exists(FLAGS.output_path): os.makedirs(FLAGS.output_path) tf.logging.info("Reading from VOC dataset") image_dir = os.path.join(FLAGS.data_dir, FLAGS.image_data_dir) label_dir = os.path.join(FLAGS.data_dir, FLAGS.label_data_dir) print(image_dir) print(label_dir) if not os.path.isdir(label_dir): raise ValueError( "Missing Augmentation label directory. " "You may download the augmented labels from the link (Thanks to DrSleep): " "https://www.dropbox.com/s/oeu149j8qtbs1x0/SegmentationClassAug.zip" ) train_examples = dataset_util.read_examples_list(FLAGS.train_data_list) val_examples = dataset_util.read_examples_list(FLAGS.valid_data_list) train_output_path = os.path.join(FLAGS.output_path, 'voc_train.record') val_output_path = os.path.join(FLAGS.output_path, 'voc_val.record') create_tf_record(train_output_path, image_dir, label_dir, train_examples) create_tf_record(val_output_path, image_dir, label_dir, val_examples)
def main(_): # if FLAGS.set not in SETS: # raise ValueError('set must be in : {}'.format(SETS)) # if FLAGS.year not in YEARS: # raise ValueError('year must be in : {}'.format(YEARS)) data_dir = 'E:/computerscience/my projects/humanoid/VOCdevkit' years = ['VOC2012'] # if FLAGS.year != 'merged': # years = [FLAGS.year] writer = tf.python_io.TFRecordWriter('pascal_train.record') label_map_dict = label_map_util.get_label_map_dict( 'data/pascal_label_map.pbtxt') for year in years: logging.info('Reading from PASCAL %s dataset.', year) examples_path = os.path.join(data_dir, year, 'ImageSets', 'Main', 'aeroplane_' + 'train' + '.txt') annotations_dir = os.path.join(data_dir, year, 'Annotations') examples_list = dataset_util.read_examples_list(examples_path) for idx, example in enumerate(examples_list): if idx % 100 == 0: logging.info('On image %d of %d', idx, len(examples_list)) path = os.path.join(annotations_dir, example + '.xml') with tf.gfile.GFile(path, 'r') as fid: xml_str = fid.read() xml = etree.fromstring(xml_str) data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation'] tf_example = dict_to_tf_example(data, data_dir, label_map_dict) print(tf_example) writer.write(tf_example.SerializeToString()) break writer.close()
def create_record_file(data_dir, output_file, year, split_name): years = ['VOC2007', 'VOC2012'] if year != 'merged': years = [year] # Create tf.Record writer writer = tf.python_io.TFRecordWriter(output_file) for year in years: print('Creating TFRecord file from PASCAL {} {} dataset'.format( year, split_name)) examples_path = os.path.join(data_dir, year, 'ImageSets', 'Main', split_name + '.txt') annotations_dir = os.path.join(data_dir, year, 'Annotations') examples_list = dataset_util.read_examples_list(examples_path) for idx in trange(0, len(examples_list)): example = examples_list[idx] # Find and parse annotation xml file path = os.path.join(annotations_dir, example + '.xml') with tf.gfile.GFile(path, 'r') as fid: xml_str = fid.read() xml = etree.fromstring(xml_str) data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation'] # Create tf.Example and add to tf.Record tf_example = _dict_to_tf_example(data, data_dir) writer.write(tf_example.SerializeToString()) writer.close() print('Saved tf Record to {}\n'.format(output_file))
def main(_): data_dir = FLAGS.data_dir print('run the here') label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map_path) logging.info('Reading from Pet dataset.') image_dir = data_dir # annotations_dir = os.path.join(data_dir, 'annotations') examples_path = os.path.join(data_dir, 'xml_random.txt') examples_list = dataset_util.read_examples_list(examples_path) # # # Test images are not included in the downloaded data set, so we shall perform # # our own split. # random.seed(42) # random.shuffle(examples_list) num_examples = len(examples_list) num_train = int(1 * num_examples) train_examples = examples_list[:num_train] # val_examples = examples_list[num_train:] # logging.info('%d training and %d validation examples.', # len(train_examples), len(val_examples)) # train_output_path = os.path.join(FLAGS.output_dir, 'object_train.record') print('train_output_path: {}'.format(train_output_path)) print('image_dir: {}'.format(image_dir)) # val_output_path = os.path.join(FLAGS.output_dir, 'pet_val.record') len_train_examples = 'train_examples number: {}'.format( len(train_examples)) print(len_train_examples) create_tf_record(train_output_path, label_map_dict, image_dir, train_examples)
def main(_): if FLAGS.set not in SETS: raise ValueError('set must be in : {}'.format(SETS)) data_dir = FLAGS.data_dir writer = tf.python_io.TFRecordWriter(FLAGS.output_path) print ("output_path is :") print(FLAGS.output_path) label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map_path) logging.info('Reading from VID dataset.') examples_path = os.path.join(data_dir,'ImageSets', 'VID','list' ,FLAGS.set + '_list.txt') annotations_dir = os.path.join(data_dir, FLAGS.annotations_dir, 'VID', FLAGS.set) examples_list = dataset_util.read_examples_list(examples_path) for idx, example in enumerate(examples_list): if idx % 100 == 0: logging.info('On image %d of %d', idx, len(examples_list)) path = os.path.join(annotations_dir, example) with tf.gfile.GFile(path, 'r') as fid: xml_str = fid.read() xml = etree.fromstring(xml_str) data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation'] tf_example = dict_to_tf_example(data, FLAGS.data_dir, label_map_dict, FLAGS.set) writer.write(tf_example.SerializeToString()) writer.close()
def main(_): data_dir = FLAGS.data_dir label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map_path) logging.info('Reading from Pet dataset.') image_dir = os.path.join(data_dir, 'images') annotations_dir = os.path.join(data_dir, 'annotations') examples_path = os.path.join(annotations_dir, 'trainval.txt') examples_list = dataset_util.read_examples_list(examples_path) # Test images are not included in the downloaded data set, so we shall perform # our own split. random.seed(42) random.shuffle(examples_list) num_examples = len(examples_list) num_train = int(0.8 * num_examples) train_examples = examples_list[:num_train] val_examples = examples_list[num_train:] logging.info('%d training and %d validation examples.', len(train_examples), len(val_examples)) train_output_path = os.path.join(FLAGS.output_dir, 'hand_train.record') val_output_path = os.path.join(FLAGS.output_dir, 'hand_val.record') create_tf_record(train_output_path, label_map_dict, annotations_dir, image_dir, train_examples) create_tf_record(val_output_path, label_map_dict, annotations_dir, image_dir, val_examples)
def main(_): if FLAGS.set not in SETS: raise ValueError('set must be in : {}'.format(SETS)) if FLAGS.year not in YEARS: raise ValueError('year must be in : {}'.format(YEARS)) data_dir = FLAGS.data_dir years = ['VOC2007', 'VOC2012'] if FLAGS.year != 'merged': years = [FLAGS.year] writer = tf.python_io.TFRecordWriter(FLAGS.output_path) for year in years: logging.info('Reading from PASCAL %s dataset.', year) examples_path = os.path.join(data_dir, year, 'ImageSets', 'Main', 'aeroplane_' + FLAGS.set + '.txt') annotations_dir = os.path.join(data_dir, year, FLAGS.annotations_dir) examples_list = dataset_util.read_examples_list(examples_path) for idx, example in enumerate(examples_list): if idx % 100 == 0: logging.info('On image %d of %d', idx, len(examples_list)) path = os.path.join(annotations_dir, example + '.xml') with tf.gfile.GFile(path, 'r') as fid: xml_str = fid.read() xml = etree.fromstring(xml_str) data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation'] tf_example = dict_to_tf_example(data, FLAGS.data_dir, FLAGS.ignore_difficult_instances) writer.write(tf_example.SerializeToString()) writer.close()
def main(_): logging.info('Reading from Pet dataset.') image_dir = os.path.join('annotations', 'images') annotations_dir = os.path.join('annotations', 'labels') examples_path = os.path.join(annotations_dir, 'trainval.txt') examples_list = dataset_util.read_examples_list(examples_path) label_map_dict = label_map_util.get_label_map_dict( os.path.join(annotations_dir, 'label_map.pbtxt')) # Test images are not included in the downloaded data set, so we shall perform # our own split. random.seed(42) random.shuffle(examples_list) num_examples = len(examples_list) num_train = int(0.7 * num_examples) train_examples = examples_list[:num_train] val_examples = examples_list[num_train:] logging.info('%d training and %d validation examples.', len(train_examples), len(val_examples)) train_output_path = 'train.record' val_output_path = 'val.record' create_tf_record(train_output_path, label_map_dict, annotations_dir, image_dir, train_examples) create_tf_record(val_output_path, label_map_dict, annotations_dir, image_dir, val_examples) if not os.path.exists(os.path.join(MODEL_DIR, 'model.ckpt.index')): download_base_model()
def main(unused_argv): if not os.path.exists(FLAGS.output_path): os.makedirs(FLAGS.output_path) tf.logging.info("Reading from xView2 dataset") image_dir = os.path.join(FLAGS.data_dir, FLAGS.image_data_dir) label_dir = os.path.join(FLAGS.data_dir, FLAGS.label_data_dir) train_examples = dataset_util.read_examples_list(FLAGS.train_data_list) val_examples = dataset_util.read_examples_list(FLAGS.valid_data_list) train_output_path = os.path.join(FLAGS.output_path, 'xview2_train.record') val_output_path = os.path.join(FLAGS.output_path, 'xview2_val.record') create_tf_record(train_output_path, image_dir, label_dir, train_examples) create_tf_record(val_output_path, image_dir, label_dir, val_examples)
def load_pascal(data_dir, set, is_detect=True, is_seg=False): assert is_detect != is_seg annotations_dir = os.path.join(data_dir, "Annotations") image_dir = os.path.join(data_dir, "JPEGImages") pascal_ann_dict = dict() if is_detect: examples_path = os.path.join(data_dir, 'ImageSets', 'Main', set + '.txt') if is_seg: examples_path = os.path.join(data_dir, 'ImageSets', 'Segmentation', set + '.txt') examples_list = dataset_util.read_examples_list(examples_path) for example in examples_list: path = os.path.join(annotations_dir, example + '.xml') with open(path, 'r') as fid: xml_str = fid.read() xml = etree.fromstring(xml_str) data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation'] data["img_path"] = os.path.join(image_dir, data['filename']) pascal_ann_dict[data['filename']] = data return pascal_ann_dict
def main(_): if FLAGS.set not in SETS: raise ValueError('set must be in : {}'.format(SETS)) data_dir = FLAGS.data_dir writer = tf.python_io.TFRecordWriter(FLAGS.output_path) label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map_path) logging.info('Reading dataset.') examples_path = '/home/wangshiyao/Documents/data/imagenet/gen_list/combine_train_list.txt' annotations_dir = '/home/wangshiyao/Documents/data/imagenet/' examples_list = dataset_util.read_examples_list(examples_path) num_label = [0] * 31 for idx, example in enumerate(examples_list): if idx % 100 == 0: logging.info('On image %d of %d', idx, len(examples_list)) if int(idx) % 100 == 0: print(idx, num_label) path = os.path.join(annotations_dir, example) with tf.gfile.GFile(path, 'r') as fid: xml_str = fid.read() xml = etree.fromstring(xml_str) data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation'] tf_example = dict_to_tf_example(data, example, FLAGS.data_dir, label_map_dict, FLAGS.set, num_label) #writer.write(tf_example.SerializeToString()) writer.close()
def main(unused_argv): # Using the Winograd non-fused algorithms provides a small performance boost. os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1' pred_hooks = None if FLAGS.debug: debug_hook = tf_debug.LocalCLIDebugHook() pred_hooks = [debug_hook] model = tf.estimator.Estimator( model_fn=deeplab_model.deeplabv3_model_fn, model_dir=FLAGS.model_dir, params={ 'output_stride': FLAGS.output_stride, 'batch_size': 1, # Batch size must be 1 because the images' size may differ 'base_architecture': FLAGS.base_architecture, 'pre_trained_model': None, 'batch_norm_decay': None, 'num_classes': _NUM_CLASSES, }) examples = dataset_util.read_examples_list(FLAGS.infer_data_list) image_files = [ os.path.join(FLAGS.data_dir, filename) for filename in examples ] predictions = model.predict( input_fn=lambda: preprocessing.eval_input_fn(image_files), hooks=pred_hooks) output_dir = FLAGS.output_dir if not os.path.exists(output_dir): os.makedirs(output_dir) for pred_dict, image_path in zip(predictions, image_files): image_basename = os.path.splitext(os.path.basename(image_path))[0] output_filename = image_basename + '_mask.png' path_to_output = os.path.join(output_dir, output_filename) img = Image.open(image_path) print("generating:", path_to_output) mask = pred_dict['decoded_labels'] mask = Image.fromarray(mask) crf_result = crf(img, pred_dict['probabilities'], 10) crf_argmax = np.expand_dims(np.expand_dims(np.argmax(crf_result, axis=2), axis=0), axis=3) crf_decode = np.squeeze( preprocessing.decode_labels(crf_argmax)).transpose((1, 0, 2)) cmap = plt.get_cmap('bwr') f, (ax1, ax2) = plt.subplots(1, 2, sharey=True) ax1.imshow(mask) ax1.set_title('Segmentation with Deeplab') ax2.imshow(crf_decode, cmap=cmap) ax2.set_title('Segmentation with CRF post-processing _ 1') f.savefig(path_to_output, bbox_inches='tight')
def test_read_examples_list(self): example_list_data = """example1 1\nexample2 2""" example_list_path = os.path.join(self.get_temp_dir(), 'examples.txt') with tf.gfile.Open(example_list_path, 'wb') as f: f.write(example_list_data) examples = dataset_util.read_examples_list(example_list_path) self.assertListEqual(['example1', 'example2'], examples)
def main(unused_argv): # Using the Winograd non-fused algorithms provides a small performance boost. os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1' pred_hooks = None if FLAGS.debug: debug_hook = tf_debug.LocalCLIDebugHook() pred_hooks = [debug_hook] model = tf.estimator.Estimator( model_fn=deeplab_model.deeplabv3_model_fn, model_dir=FLAGS.model_dir, params={ 'output_stride': FLAGS.output_stride, 'batch_size': 1, # Batch size must be 1 because the images' size may differ 'base_architecture': FLAGS.base_architecture, 'pre_trained_model': None, 'batch_norm_decay': None, 'num_classes': _NUM_CLASSES, }) examples = dataset_util.read_examples_list(FLAGS.infer_data_list) image_files = [ os.path.join(FLAGS.data_dir, filename) + '.jpg' for filename in examples ] output_dir = FLAGS.output_dir if not os.path.exists(output_dir): os.makedirs(output_dir) CROP_HEIGHT = 500 CROP_WIDTH = 500 for img in image_files: width, height = Image.open(img).size full_mask = Image.new('RGBA', (width, height)) print('SIZE', width, height) for i in range(0, int(math.ceil(width / CROP_WIDTH))): for j in range(0, int(math.ceil(height / CROP_HEIGHT))): CROP = [ j * CROP_HEIGHT, i * CROP_WIDTH, CROP_HEIGHT if (j + 1) * CROP_HEIGHT <= height else height % CROP_HEIGHT, CROP_WIDTH if (i + 1) * CROP_WIDTH <= width else width % CROP_WIDTH ] print(CROP) predictions = model.predict( input_fn=lambda: preprocessing.infer_input_fn([img], CROP), hooks=pred_hooks) mask = next(predictions)['decoded_labels'] mask = Image.fromarray(mask) full_mask.paste(mask, (CROP[1], CROP[0])) print('saving') image_basename = img.split('.')[0] full_mask.save(image_basename + '_mask.png')
def main(unused_argv): if not os.path.exists(FLAGS.output_path): os.makedirs(FLAGS.output_path) tf.logging.info("Reading from VOC dataset") image_dir = os.path.join(FLAGS.data_dir, FLAGS.image_data_dir) label_dir = os.path.join(FLAGS.data_dir, FLAGS.label_data_dir) if not os.path.isdir(label_dir): raise ValueError("Missing Augmentation label directory.") train_examples = dataset_util.read_examples_list(FLAGS.train_data_list) val_examples = dataset_util.read_examples_list(FLAGS.valid_data_list) train_output_path = os.path.join(FLAGS.output_path, 'voc_train.record') val_output_path = os.path.join(FLAGS.output_path, 'voc_val.record') create_tf_record(train_output_path, image_dir, label_dir, train_examples) create_tf_record(val_output_path, image_dir, label_dir, val_examples)
def main(unused_argv): # Using the Winograd non-fused algorithms provides a small performance boost. os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1' pred_hooks = None if FLAGS.debug: debug_hook = tf_debug.LocalCLIDebugHook() pred_hooks = [debug_hook] model = tf.estimator.Estimator( model_fn=deeplab_model.deeplabv3_plus_model_fn, model_dir=FLAGS.model_dir, params={ 'output_stride': FLAGS.output_stride, 'batch_size': 1, # Batch size must be 1 because the images' size may differ 'base_architecture': FLAGS.base_architecture, 'pre_trained_model': None, 'batch_norm_decay': None, 'num_classes': _NUM_CLASSES, }) print("We are after estimator" + str(time.time()-startTime)) examples = dataset_util.read_examples_list(FLAGS.infer_data_list) image_files = [os.path.join(FLAGS.data_dir, filename +".jpg") for filename in examples] while True: predictions = model.predict( input_fn=lambda: preprocessing.eval_input_fn(image_files), hooks=pred_hooks) # predictions = model.predict( # input_fn=lambda: iter(cam), # hooks=pred_hooks) print("We are after prediction"+ str(time.time()-startTime)) output_dir = FLAGS.output_dir if not os.path.exists(output_dir): os.makedirs(output_dir) for pred_dict, image_path in zip(predictions, image_files): # ret, frame = cap.read() # cv2.imshow("camera", frame) # time.sleep(1) # cv2.waitKey(0) # cv2.imwrite("./img.jpg", frame) print(str(time.time() - startTime)) image_basename = os.path.splitext(os.path.basename(image_path))[0] output_filename = image_basename + '_mask.png' path_to_output = os.path.join(output_dir, output_filename) print("generating:", path_to_output) mask = pred_dict['decoded_labels'] mask = Image.fromarray(mask) plt.axis('off') plt.imshow(mask) # plt.show() plt.savefig(path_to_output, bbox_inches='tight')
def main(unused_argv): if not os.path.exists(FLAGS.output_path): os.makedirs(FLAGS.output_path) tf.logging.info("Reading from cityscapes dataset") # left_dir = os.path.join(FLAGS.data_dir, FLAGS.left_data_dir) # right_dir = os.path.join(FLAGS.data_dir, FLAGS.right_data_dir) # semantic_dir = os.path.join(FLAGS.data_dir, FLAGS.semantic_data_dir) # disparity_dir = os.path.join(FLAGS.data_dir, FLAGS.disparity_data_dir) train_examples = dataset_util.read_examples_list(FLAGS.train_data_list) val_examples = dataset_util.read_examples_list(FLAGS.valid_data_list) train_output_path = os.path.join(FLAGS.output_path, 'cityscapes_train.record') val_output_path = os.path.join(FLAGS.output_path, 'cityscapes_val.record') create_tf_record(train_output_path, train_examples) create_tf_record(val_output_path, val_examples)
def main(unused_argv): # Using the Winograd non-fused algorithms provides a small performance boost. os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1' pred_hooks = None if FLAGS.debug: debug_hook = tf_debug.LocalCLIDebugHook() pred_hooks = [debug_hook] model = tf.estimator.Estimator( model_fn=fcn8_vgg_model.model_fn, model_dir=FLAGS.model_dir, params={ 'batch_size': 1, # Batch size must be 1 because the images' size may differ 'num_classes': _NUM_CLASSES, 'weight_decay': FLAGS.weight_decay, 'learning_rate_policy': FLAGS.learning_rate_policy, 'num_train': _NUM_IMAGES['train'], 'initial_learning_rate': FLAGS.initial_learning_rate, 'max_iter': FLAGS.max_iter, 'end_learning_rate': FLAGS.end_learning_rate, 'power': _POWER, 'momentum': _MOMENTUM, 'initial_global_step': FLAGS.initial_global_step, 'vgg16_ckpt_path': FLAGS.vgg16_ckpt_path, 'vgg16_npy_path': FLAGS.vgg16_npy_path, 'keep_prob': FLAGS.keep_prob, }) examples = dataset_util.read_examples_list(FLAGS.infer_data_list) image_files = [ os.path.join(FLAGS.data_dir, filename) for filename in examples ] print(image_files) predictions = model.predict( input_fn=lambda: preprocessing.eval_input_fn(image_files), hooks=pred_hooks) output_dir = FLAGS.output_dir if not os.path.exists(output_dir): os.makedirs(output_dir) for pred_dict, image_path in zip(predictions, image_files): image_basename = os.path.splitext(os.path.basename(image_path))[0] output_filename = image_basename + '_mask.png' path_to_output = os.path.join(output_dir, output_filename) print("generating:", path_to_output) mask = pred_dict['decoded_labels'] print('mask', mask.shape) mask = Image.fromarray(mask) mask.save(path_to_output)
def main(_): """ Main function for generating wider face TFRecord """ train_data_dir = FLAGS.train_data_dir # train_data_dir: ./WIDER_train/images val_data_dir = FLAGS.val_data_dir # val_data_dir: ./WIDER_val/images train_examples_path = FLAGS.train_examples_path # train_examples_path: ./examples_files/train_examples.txt val_examples_path = FLAGS.val_examples_path # val_examples_path: ./examples_files/val_examples.txt # TODO: check read_examples_list here train_examples = dataset_util.read_examples_list( train_examples_path ) # train_example: a file contains the all names of the images # (e.g. 0--Parade/0_Parade_marchingband_1_849.jpg) val_examples = dataset_util.read_examples_list( val_examples_path ) # val_example: a file contains the all names of the images # (e.g. 0--Parade/0_Parade_marchingband_1_465.jpg) train_anno_path = FLAGS.train_anno_path val_anno_path = FLAGS.val_anno_path train_anno_dict = ra.read_anno(train_anno_path) val_anno_dict = ra.read_anno(val_anno_path) label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map_path) random.seed(42) random.shuffle(train_examples) random.shuffle(val_examples) # output_dir: ./tfrecord_data train_output_path = os.path.join(FLAGS.output_dir, 'wider_faces_train.record') val_output_path = os.path.join(FLAGS.output_dir, 'wider_faces_val.record') create_tf_record(train_output_path, FLAGS.num_shards, label_map_dict, train_anno_dict, train_data_dir, train_examples) create_tf_record(val_output_path, FLAGS.num_shards, label_map_dict, val_anno_dict, val_data_dir, val_examples)
def main(unused_argv): # Using the Winograd non-fused algorithms provides a small performance boost. os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1' pred_hooks = None if FLAGS.debug: debug_hook = tf_debug.LocalCLIDebugHook() pred_hooks = [debug_hook] model = tf.estimator.Estimator( model_fn=deeplab_model.deeplabv3_model_fn, model_dir=FLAGS.model_dir, params={ 'output_stride': FLAGS.output_stride, 'batch_size': 1, # Batch size must be 1 because the images' size may differ 'base_architecture': FLAGS.base_architecture, 'pre_trained_model': None, 'batch_norm_decay': None, 'num_classes': _NUM_CLASSES, }) examples = dataset_util.read_examples_list(FLAGS.infer_data_list) image_files = [ os.path.join(FLAGS.data_dir, filename) for filename in examples ] predictions = model.predict( input_fn=lambda: preprocessing.eval_input_fn(image_files), hooks=pred_hooks) output_dir = FLAGS.output_dir if not os.path.exists(output_dir): os.makedirs(output_dir) #modle的输出predictions包含classes图片,probabilities概率图片,decoded_labels解码之后的图片 for pred_dict, image_path in zip(predictions, image_files): #图片和路径结合 image_basename = os.path.splitext(os.path.basename(image_path))[0] output_filename = image_basename + '_mask.png' path_to_output = os.path.join(output_dir, output_filename) print("generating:", path_to_output) mask = pred_dict['decoded_labels'] #取解码之后的图片作为结果 mask = Image.fromarray(mask) plt.axis('off') plt.imshow(mask) plt.savefig(path_to_output, bbox_inches='tight')
def main(): '''生成tfrecords主程序 ''' if not os.path.exists(Config.tfrecord_path): os.makedirs(Config.tfrecord_path) #相当于print tf.logging.info('读取数据') image_dir = os.path.join(Config.data_dir, Config.image_data_dir) label_dir = os.path.join(Config.data_dir, Config.label_data_dir) if not os.path.isdir(label_dir): raise ValueError('数据缺少,去下载') #获取训练和验证图片的index train_examples = dataset_util.read_examples_list(Config.train_data_list) val_examples = dataset_util.read_examples_list(Config.val_data_list) #训练验证tfrecord存储地址 train_output_path = os.path.join(Config.tfrecord_path, 'train.record') val_output_path = os.path.join(Config.tfrecord_path, 'val.record') #生成tfrecord create_record(train_output_path, image_dir, label_dir, train_examples) create_record(val_output_path, image_dir, label_dir, val_examples)
def main(): os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '0' # 获取数据 examples = dataset_util.read_examples_list(FLAGS.evaluation_data_list) image_files = [os.path.join(FLAGS.image_data_dir, filename) + '.jpg' for filename in examples] label_files = [os.path.join(FLAGS.label_data_dir, filename) + '.png' for filename in examples] features, labels = eval_or_test_input_fn.eval_input_fn(image_files, label_files) # Manually load the latest checkpoint saver = tf.train.Saver() with tf.Session() as sess: ckpt = tf.train.get_checkpoint_state(FLAGS.model_dir) saver.restore(sess, ckpt.model_checkpoint_path)
def main(_): print(FLAGS.data_dir) if FLAGS.set not in SETS: raise ValueError('set must be in : {}'.format(SETS)) #if FLAGS.year not in YEARS: # raise ValueError('year must be in : {}'.format(YEARS)) data_dir = FLAGS.data_dir #years = ['VOC2007', 'VOC2012'] #if FLAGS.year != 'merged': years = [FLAGS.year] ACTIONSET = ['tfrecord', 'imageset'] if FLAGS.action not in ACTIONSET: raise ValueError('action must be in : {}'.format(ACTIONSET)) if FLAGS.action == 'tfrecord': pass elif FLAGS.action == 'imageset': gen_image_set(FLAGS.data_dir, FLAGS.year, FLAGS.imageset) return writer = tf.io.TFRecordWriter(FLAGS.output_path) label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map_path) for year in years: logging.info('Reading from PASCAL %s dataset.', year) examples_path = os.path.join(data_dir, year, 'ImageSets', 'Main', FLAGS.imageset + '_' + FLAGS.set + '.txt') annotations_dir = os.path.join(data_dir, year, FLAGS.annotations_dir) examples_list = dataset_util.read_examples_list(examples_path) for idx, example in enumerate(examples_list): if idx % 100 == 0: logging.info('On image %d of %d', idx, len(examples_list)) path = os.path.join(annotations_dir, example + '.xml') with tf.io.gfile.GFile(path, 'r') as fid: xml_str = fid.read() xml = etree.fromstring(xml_str.encode('utf-8')) data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation'] tf_example = dict_to_tf_example(data, FLAGS.data_dir, label_map_dict, FLAGS.ignore_difficult_instances) writer.write(tf_example.SerializeToString()) writer.close()
def main(_): if FLAGS.set not in SETS: raise ValueError('set must be in : {}'.format(SETS)) data_dir = FLAGS.data_dir writer = tf.python_io.TFRecordWriter(FLAGS.output_path) categories = [line.rstrip('\n') for line in open(FLAGS.categories_path)] logging.info('Reading from UECFOOD256 dataset.') list_set_path = os.path.join(data_dir, 'ImageSets', FLAGS.set + '.txt') images_dir = os.path.join(data_dir, FLAGS.images_dir) annotations_dir = os.path.join(data_dir, FLAGS.annotations_dir) list_set = dataset_util.read_examples_list(list_set_path) for idx, example in enumerate(list_set): if idx % 100 == 0: logging.info('On image %d of %d', idx, len(list_set)) # Annotation characteristics ann_path = os.path.join(annotations_dir, example + '.txt') # Read annots data = [line.rstrip('\n').split() for line in open(ann_path)] bboxes = [] labels = [] for obj in data: labels.append(float(obj[0])) bboxes.append( [float(obj[1]), float(obj[2]), float(obj[3]), float(obj[4])]) data = { 'data_dir': images_dir, 'filename': example + '.jpg', 'boxes': bboxes, 'labels': labels, } tf_example = dict_to_tf_example(data, FLAGS.data_dir, categories, FLAGS.ignore_difficult_instances) writer.write(tf_example.SerializeToString()) writer.close()
def main(_): root_dir = FLAGS.root_dir if FLAGS.set not in SETS: raise ValueError('set must be in : {}'.format(SETS)) # Read Example list files logging.info('Reading from VID 2015 dataset. ({})'.format(root_dir)) list_file_pattern = 'ImageSets/VID/{}*.txt'.format(FLAGS.set) examples_paths = sorted( glob.glob(os.path.join(root_dir, list_file_pattern))) #print('examples_paths', examples_paths) examples_list = [] for examples_path in examples_paths: examples_list.extend(dataset_util.read_examples_list(examples_path)) if FLAGS.set != 'train': examples_list2 = [e[:-7] for e in examples_list] examples_list = sorted(list(set(examples_list2))) if FLAGS.num_examples > 0: examples_list = examples_list[:FLAGS.num_examples] #print('examples_list', examples_list) # Sharding start_shard = FLAGS.start_shard num_shards = FLAGS.num_shards num_digits = math.ceil(math.log10(max(num_shards - 1, 2))) shard_format = '%0' + ('%d' % num_digits) + 'd' examples_per_shard = int(math.ceil(len(examples_list) / float(num_shards))) annotations_dir = os.path.join(root_dir, 'Annotations/VID/{}'.format(FLAGS.set)) print('annotations_dir', annotations_dir) # Generate each shard for i in range(start_shard, num_shards): start = i * examples_per_shard end = (i + 1) * examples_per_shard out_filename = os.path.join( FLAGS.output_path, 'VID_2015-' + (shard_format % i) + '.tfrecord') if os.path.isfile(out_filename): # Don't recreate data if restarting continue print( str(i) + 'of' + str(num_shards) + '[' + str(start) + ':' + str(end), ']' + out_filename) gen_shard(examples_list[start:end], annotations_dir, out_filename, root_dir, FLAGS.set) return
def main(_): logging.info('Prepare process samples in {}'.format(FLAGS.data_dir)) data_dir = FLAGS.data_dir years = list(map(lambda x: x.strip(), str(FLAGS.year).split(','))) label_map_file = FLAGS.label_map_path if not os.path.exists(label_map_file): label_map_file = os.path.join(data_dir, 'label_map.pbtxt') if not os.path.exists(label_map_file): raise FileExistsError('label map file not exist.') label_map_dict = label_map_util.get_label_map_dict(label_map_file) output_path = FLAGS.output_path if not output_path: output_path = os.path.basename(os.path.dirname(data_dir + os.sep)) + '.tfrecord' logging.info('Prepare write samples to {}'.format(output_path)) writer = tf.io.TFRecordWriter(output_path) for year in years: logging.info('Reading from PASCAL %s dataset.', year) examples_path = gen_image_set(FLAGS.data_dir, year) examples_list = dataset_util.read_examples_list(examples_path) annotations_dir = os.path.join(data_dir, year, FLAGS.annotations_dir) for idx, example in enumerate(examples_list): if idx % 100 == 0: logging.info('On image %d of %d', idx, len(examples_list)) path = os.path.join(annotations_dir, example + '.xml') with tf.io.gfile.GFile(path, 'r') as fid: xml_str = fid.read() xml = etree.fromstring(xml_str.encode('utf-8')) data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation'] tf_example = dict_to_tf_example(data, FLAGS.data_dir, year, label_map_dict, FLAGS.ignore_difficult_instances) writer.write(tf_example.SerializeToString()) writer.close()
def main(_): data_dir = FLAGS.data_dir vis_dir = FLAGS.vis_dir image_dir = os.path.join(data_dir, 'grid_maps') examples_path = os.path.join(data_dir, 'trainval.txt') examples_list = dataset_util.read_examples_list(examples_path) num_examples = len(examples_list) num_train = int(0.7 * num_examples) train_examples = examples_list[:num_train] val_examples = examples_list[num_train:] label_map_path = FLAGS.label_map_path graph_dir = FLAGS.graph_path train_output_path = os.path.join(FLAGS.output_dir, 'train') val_output_path = os.path.join(FLAGS.output_dir, 'val') create_kitti_labels(val_output_path, label_map_path, image_dir, vis_dir, graph_dir, val_examples)
def main(_): data_dir = FLAGS.data_dir label_map_dict = label_map_util.get_label_map_dict( FLAGS.label_map_path) #mapping the labels to index logging.info('Reading from Pet dataset.') image_dir = os.path.join(data_dir, 'images') #Image directory annotations_dir = os.path.join(data_dir, 'annotations') #Annotation diector examples_path = os.path.join( annotations_dir, 'trainval.txt') #example names and conventions examples_list = dataset_util.read_examples_list( examples_path ) #readinf this as a list . Help to identify image and index # Test images are not included in the downloaded data set, so we shall perform # our own split. random.seed(42) random.shuffle(examples_list) num_examples = len(examples_list) num_train = int(0.7 * num_examples) #deviding the training train_examples = examples_list[:num_train] # Train VAl val_examples = examples_list[num_train:] #val logging.info('%d training and %d validation examples.', len(train_examples), len(val_examples)) train_output_path = os.path.join( FLAGS.output_dir, 'pet_train.record') #output path to create records val_output_path = os.path.join(FLAGS.output_dir, 'pet_val.record') create_tf_record( train_output_path, label_map_dict, annotations_dir, #creating the tf records image_dir, train_examples) create_tf_record(val_output_path, label_map_dict, annotations_dir, image_dir, val_examples)
def main(_): if FLAGS.set not in SETS: raise ValueError('set must be in : {}'.format(SETS)) data_dir = FLAGS.data_dir writer = tf.python_io.TFRecordWriter(FLAGS.output_path) label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map_path) print('Reading from PASCAL dataset.') examples_path = os.path.join(data_dir, 'ImageSets', 'Main', FLAGS.set + '.txt') if FLAGS.include_segment_class or FLAGS.include_segment_object: examples_path = os.path.join(data_dir, 'ImageSets', 'Segmentation', FLAGS.set + '.txt') annotations_dir = os.path.join(data_dir, FLAGS.annotations_dir) examples_list = dataset_util.read_examples_list(examples_path) for idx, example in enumerate(examples_list): if idx % 100 == 0: logging.info('On image %d of %d', idx, len(examples_list)) path = os.path.join(annotations_dir, example + '.xml') mask_filename = None if FLAGS.include_segment_class or FLAGS.include_segment_object: mask_filename = example + ".png" with tf.gfile.GFile(path, 'r') as fid: xml_str = fid.read() xml = etree.fromstring(xml_str) data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation'] tf_example = dict_to_tf_example(data, FLAGS.data_dir, label_map_dict, FLAGS.ignore_difficult_instances, mask_filename=mask_filename, include_segment_class=FLAGS.include_segment_class, include_segment_object=FLAGS.include_segment_object) writer.write(tf_example.SerializeToString()) writer.close()