Ejemplo n.º 1
0
def run(config_file, experiment_id, recreate):
    environment.init(experiment_id)
    config = config_util.load(config_file)

    if horovod_util.is_enabled():
        horovod_util.setup()

    if horovod_util.is_rank0():
        config_util.display(config)
        executor.init_logging(config)

        executor.prepare_dirs(recreate)
        config_util.copy_to_experiment_dir(config_file)
        config_util.save_yaml(environment.EXPERIMENT_DIR, config)

    start_training(config)
Ejemplo n.º 2
0
def test_example_object_detection_config_yaml():
    """Test that export config and meta yaml from example object_detection config python."""

    config_file = os.path.join("..", "blueoil", "configs", "example",
                               "object_detection.py")

    config = _load_py(config_file)

    config_yaml = os.path.join("..", "blueoil", "configs", "example",
                               "object_detection.yaml")

    config_meta = os.path.join("..", "blueoil", "configs", "example",
                               "object_detection_meta.yaml")

    environment.init("test_example_object_detection_config_yaml")
    saved_config, saved_meta = save_yaml(environment.EXPERIMENT_DIR, config)

    with open(config_yaml) as f:
        expected = f.read()
    with open(saved_config) as f:
        data = f.read()
        assert expected == data

    with open(config_meta) as f:
        expected = f.read()
    with open(saved_meta) as f:
        data = f.read()
        assert expected == data
Ejemplo n.º 3
0
def run(network, dataset, config_file, experiment_id, recreate):
    environment.init(experiment_id)
    config = config_util.load(config_file)

    if network:
        network_class = module_loader.load_network_class(network)
        config.NETWORK_CLASS = network_class
    if dataset:
        dataset_class = module_loader.load_dataset_class(dataset)
        config.DATASET_CLASS = dataset_class

    if horovod_util.is_enabled():
        horovod_util.setup()

    if horovod_util.is_rank0():
        config_util.display(config)
        executor.init_logging(config)

        executor.prepare_dirs(recreate)
        config_util.copy_to_experiment_dir(config_file)
        config_util.save_yaml(environment.EXPERIMENT_DIR, config)

    start_training(config)
Ejemplo n.º 4
0
def _export(config, restore_path, image_path):
    if restore_path is None:
        restore_file = executor.search_restore_filename(
            environment.CHECKPOINTS_DIR)
        restore_path = os.path.join(environment.CHECKPOINTS_DIR, restore_file)

    print("Restore from {}".format(restore_path))

    if not os.path.exists("{}.index".format(restore_path)):
        raise Exception("restore file {} dont exists.".format(restore_path))

    output_root_dir = os.path.join(environment.EXPERIMENT_DIR, "export")
    output_root_dir = os.path.join(output_root_dir,
                                   os.path.basename(restore_path))

    if not os.path.exists(output_root_dir):
        os.makedirs(output_root_dir)

    graph = tf.Graph()
    ModelClass = config.NETWORK_CLASS
    network_kwargs = dict(
        (key.lower(), val) for key, val in config.NETWORK.items())

    with graph.as_default():

        model = ModelClass(
            classes=config.CLASSES,
            is_debug=config.IS_DEBUG,
            **network_kwargs,
        )

        is_training = tf.constant(False, name="is_training")

        images_placeholder, _ = model.placeholders()
        model.inference(images_placeholder, is_training)
        init_op = tf.compat.v1.global_variables_initializer()

        saver = tf.compat.v1.train.Saver(max_to_keep=50)

    session_config = tf.compat.v1.ConfigProto()
    sess = tf.compat.v1.Session(graph=graph, config=session_config)
    sess.run(init_op)

    saver.restore(sess, restore_path)

    main_output_dir = os.path.join(
        output_root_dir, "{}x{}".format(config.IMAGE_SIZE[0],
                                        config.IMAGE_SIZE[1]))
    if not os.path.exists(main_output_dir):
        os.makedirs(main_output_dir)

    # save inference values as npy files for runtime inference test and debug.
    if image_path:
        all_ops = _minimal_operations(sess)
        inference_values_output_dir = os.path.join(main_output_dir,
                                                   "inference_test_data")

        if not os.path.exists(inference_values_output_dir):
            os.makedirs(inference_values_output_dir)

        raw_image = load_image(image_path)
        image = _pre_process(raw_image, config.PRE_PROCESSOR,
                             config.DATA_FORMAT)
        images = np.expand_dims(image, axis=0)
        feed_dict = {
            images_placeholder: images,
        }

        all_outputs = []
        index = 0
        for op in all_ops:
            for op_output in op.outputs:
                # HACK: This is for TensorFlow bug workaround.
                # We can remove following 4 lines once it's been resolved in TensorFlow
                # Issue link: https://github.com/tensorflow/tensorflow/issues/36456
                if (not tf.config.experimental.list_physical_devices('GPU')
                        and "FusedBatchNormV3" in op_output.name and int(
                            op_output.name.split(":")[1]) in set(range(1, 6))):
                    continue
                val = sess.run(op_output.name, feed_dict=feed_dict)
                name = '%03d' % index + '_' + op_output.name.replace('/', '_')
                all_outputs.append({'val': val, 'name': name})
                index += 1

        _save_all_operation_outputs(image_path, inference_values_output_dir,
                                    image, raw_image, all_outputs,
                                    config.IMAGE_SIZE)

    yaml_names = config_util.save_yaml(main_output_dir, config)
    pb_name = executor.save_pb_file(sess, main_output_dir)

    message = """
Create pb and yaml files in: {}
pb: {}
yaml: {}, {}
""".format(main_output_dir, pb_name, *yaml_names)

    if image_path:
        message += "Create npy files in under `inference_test_data` folder \n"
        message += "npy: {}".format([d["name"] for d in all_outputs] + [
            "raw_image",
            "preprocessed_image",
        ])

    print(message)
    print("finish")

    return main_output_dir