def test_example_classification_config_yaml(): """ Test that export config and meta yaml from example classification config python. """ config_file = os.path.join("configs", "example", "classification.py") config = _load_py(config_file) config_yaml = os.path.join("configs", "example", "classification.yaml") config_meta = os.path.join("configs", "example", "classification_meta.yaml") environment.init("test_example_classification_config_yaml") saved_config, saved_meta = save_yaml(environment.EXPERIMENT_DIR, config) print(saved_meta) with open(config_yaml) as f: expected = f.read() with open(saved_config) as f: data = f.read() assert expected == data with open(config_meta) as f: expected = f.read() with open(saved_meta) as f: data = f.read() assert expected == data
def run(network, dataset, config_file, experiment_id, recreate): environment.init(experiment_id) config = config_util.load(config_file) if network: network_class = module_loader.load_network_class(network) config.NETWORK_CLASS = network_class if dataset: dataset_class = module_loader.load_dataset_class(dataset) config.DATASET_CLASS = dataset_class config_util.display(config) executor.init_logging(config) executor.prepare_dirs(recreate) config_util.copy_to_experiment_dir(config_file) config_util.save_yaml(environment.EXPERIMENT_DIR, config) start_training(config)
def _export(config, restore_path, image_path): if restore_path is None: restore_file = executor.search_restore_filename( environment.CHECKPOINTS_DIR) restore_path = os.path.join(environment.CHECKPOINTS_DIR, restore_file) print("Restore from {}".format(restore_path)) if not os.path.exists("{}.index".format(restore_path)): raise Exception("restore file {} dont exists.".format(restore_path)) output_root_dir = os.path.join(environment.EXPERIMENT_DIR, "export") output_root_dir = os.path.join(output_root_dir, os.path.basename(restore_path)) if not os.path.exists(output_root_dir): os.makedirs(output_root_dir) graph = tf.Graph() ModelClass = config.NETWORK_CLASS network_kwargs = dict( (key.lower(), val) for key, val in config.NETWORK.items()) with graph.as_default(): model = ModelClass( classes=config.CLASSES, is_debug=config.IS_DEBUG, **network_kwargs, ) is_training = tf.constant(False, name="is_training") images_placeholder, _ = model.placeholders() model.inference(images_placeholder, is_training) init_op = tf.global_variables_initializer() saver = tf.train.Saver(max_to_keep=50) session_config = tf.ConfigProto() sess = tf.Session(graph=graph, config=session_config) sess.run(init_op) saver.restore(sess, restore_path) main_output_dir = os.path.join( output_root_dir, "{}x{}".format(config.IMAGE_SIZE[0], config.IMAGE_SIZE[1])) if not os.path.exists(main_output_dir): os.makedirs(main_output_dir) # npy files for DLK debug. if image_path: all_ops = _minimal_operations(sess) npy_output_dir = os.path.join(main_output_dir, "inference_test_data") if not os.path.exists(npy_output_dir): os.makedirs(npy_output_dir) raw_image = load_image(*config.IMAGE_SIZE) image = _pre_process(raw_image, config.PRE_PROCESSOR, config.DATA_FORMAT) images = np.expand_dims(image, axis=0) feed_dict = { images_placeholder: images, } all_outputs = [] index = 0 for op in all_ops: for op_output in op.outputs: val = sess.run(op_output.name, feed_dict=feed_dict) name = '%03d' % index + '_' + op_output.name.replace('/', '_') all_outputs.append({'val': val, 'name': name}) index += 1 _save_npy(image_path, npy_output_dir, image, raw_image, all_outputs, config.IMAGE_SIZE) yaml_names = config_util.save_yaml(main_output_dir, config) pb_name = executor.save_pb_file(sess, main_output_dir) message = """ Create pb and yaml files in: {} pb: {} yaml: {}, {} """.format(main_output_dir, pb_name, *yaml_names) if image_path: message += "Create npy files in under `inference_test_data` folder \n" message += "npy: {}".format([d["name"] for d in all_outputs] + [ "raw_image", "preprocessed_image", ]) print(message) print("finish") return main_output_dir