def _prepare_signature(layers: dict, model_keys):
    if tf_version.split(".")[0] == "2":
        disable_eager_execution()
    signature = {}
    for key, value in model_keys.items():
        if value in layers.keys():
            x = array_ops.placeholder(
                dtype=type_mapping[layers[value].precision],
                shape=layers[value].shape,
                name=value)
            x_tensor_info = build_tensor_info(x)
            signature[key] = x_tensor_info
    return signature
Beispiel #2
0
def save_h5_pb(path_pb: str, path_json='', path_h5='', model=None, **kwargs):
    '''
    Восстанавливает архитектуру модели, используя .json.
    Создаёт keras модель в формате .h5
    Конвертирует keras модель из формата .h5 в формат .pb (для TFServ)
    :param model: object. tf.keras
    :param path_json: string
    :param path_h5: string
    :param path_pb: string
    :param kwargs: dict. При использовании custom_objects в tf.keras model
    '''
    if int(__version__.split('.')[0]) == 2:
        if path_json:
            print('load structure.json')
            model = load_structure(path_json, **kwargs)
        if path_h5:
            print('load weights.h5')
            filename = os.path.join(path_h5, 'weights.h5')
            model.load_weights(filename)
        model.save(path_pb)
    else:
        if path_json:
            print('load structure.json')
            model = load_structure(path_json, **kwargs)
        if path_h5:
            print('load weights.h5')
            filename = os.path.join(path_h5, 'weights.h5')
            model.load_weights(filename)
            filename = os.path.join(path_h5, 'model.h5')
            model.save(filename)
        assert len(
            os.listdir(path_pb)) == 0, (f'Директория {path_pb} не пуста')
        with get_session() as sess:
            simple_save(sess,
                        path_pb,
                        inputs={'input_image': model.input},
                        outputs={t.name: t
                                 for t in model.outputs})
    clear_session()
    print(f'\nУспешно сохранены данные в {os.path.dirname(path_pb)}')
Beispiel #3
0
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# ----------------------------------------------------------------------------
"""TensorFlow specific machine learning functions."""
import os as _os
from tensorflow import __version__ as _tf_version
from open3d import _build_config

if not _build_config["Tensorflow_VERSION"]:
    raise Exception('Open3D was not built with TensorFlow support!')

_o3d_tf_version = _build_config["Tensorflow_VERSION"].split('.')
if _tf_version.split('.')[:2] != _o3d_tf_version[:2]:
    _o3d_tf_version[2] = '*'  # Any patch level is OK
    match_tf_ver = '.'.join(_o3d_tf_version)
    raise Exception('Version mismatch: Open3D needs TensorFlow version {}, but'
                    ' version {} is installed!'.format(match_tf_ver,
                                                       _tf_version))

from . import layers
from . import ops

# put framework independent modules here for convenience
from . import configs
from . import datasets
from . import vis

# framework specific modules from open3d-ml
Beispiel #4
0
def _create_request_dict(
    job_id,
    region,
    image_uri,
    chief_config,
    worker_count,
    worker_config,
    entry_point_args,
    job_labels,
):
    """Creates request dictionary for the CAIP training service."""
    training_input = {}
    training_input["region"] = region
    training_input["scaleTier"] = "custom"
    training_input["masterType"] = gcp.get_machine_type(
        chief_config.cpu_cores, chief_config.memory,
        chief_config.accelerator_type)

    # Set master config
    chief_machine_config = {}
    chief_machine_config["imageUri"] = image_uri
    chief_machine_config["acceleratorConfig"] = {}
    chief_machine_config["acceleratorConfig"]["count"] = str(
        chief_config.accelerator_count)
    chief_machine_config["acceleratorConfig"][
        "type"] = gcp.get_accelerator_type(chief_config.accelerator_type.value)

    training_input["masterConfig"] = chief_machine_config
    training_input["workerCount"] = str(worker_count)

    if worker_count > 0:
        training_input["workerType"] = gcp.get_machine_type(
            worker_config.cpu_cores,
            worker_config.memory,
            worker_config.accelerator_type,
        )

        worker_machine_config = {}
        worker_machine_config["imageUri"] = image_uri
        worker_machine_config["acceleratorConfig"] = {}
        worker_machine_config["acceleratorConfig"]["count"] = str(
            worker_config.accelerator_count)
        worker_machine_config["acceleratorConfig"][
            "type"] = gcp.get_accelerator_type(
                worker_config.accelerator_type.value)

        if machine_config.is_tpu_config(worker_config):
            # AI Platform runtime version spec is required for training
            # on cloud TPUs.
            v = VERSION.split(".")
            worker_machine_config["tpuTfVersion"] = v[0] + "." + v[1]
        training_input["workerConfig"] = worker_machine_config

    if entry_point_args is not None:
        training_input["args"] = entry_point_args

    # This is temporarily required so that the `TF_CONFIG` generated by
    # CAIP uses the keyword 'chief' instead of 'master'.
    training_input["use_chief_in_tf_config"] = True
    request_dict = {}
    request_dict["jobId"] = job_id
    request_dict["trainingInput"] = training_input
    if job_labels:
        request_dict["labels"] = job_labels
    return request_dict
import falcon
from grpc import StatusCode
import numpy as np
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import tensor_shape
from tensorflow_serving.apis import predict_pb2
from tensorflow.python.framework import dtypes as dtypes
from tensorflow.python.framework import tensor_util as tensor_util
from ie_serving.config import GLOBAL_CONFIG
from ie_serving.models.shape_management.utils import BatchingMode, ShapeMode
from ie_serving.server.constants import \
    INVALID_INPUT_KEY, INVALID_SHAPE, INVALID_BATCHSIZE, GRPC, REST
from ie_serving.logger import get_logger
from tensorflow import __version__ as tf_version
if tf_version.split(".")[0] == "2":
    from tensorflow import make_ndarray, make_tensor_proto
else:  # TF version 1.x
    from tensorflow.contrib.util import make_ndarray, make_tensor_proto

logger = get_logger(__name__)

statusCodes = {
    'invalid_arg': {
        GRPC: StatusCode.INVALID_ARGUMENT,
        REST: falcon.HTTP_BAD_REQUEST
    },
}


def prepare_input_data(target_engine, data, service_type):
Beispiel #6
0
# Determine number of threads
from os import environ
from json import load as loadf
import numpy as np
from multiprocessing import cpu_count
# For creating references to weights
from hashlib import sha256
from jsonpickle import encode as dencode
from os import mkdir
from os.path import isdir
# Autogeneration of training sets
from sklearn.model_selection import train_test_split
# Determine TensorFlow version for compatability
from tensorflow import __version__ as tfversion

tfversion = tfversion.split(".")
tfversion = list(map(lambda x: int(x), tfversion))
twoOh = [2, 0, 0]
if tfversion >= twoOh:
    from tensorflow.keras import backend as K
    from tensorflow.keras.optimizers import Adam
    from tensorflow import distribute as D
    import tensorflow as tf
    from tensorflow.keras.callbacks import CSVLogger
    from tensorflow.keras.callbacks import Callback as KCallback
    from tensorflow.keras.callbacks import LearningRateScheduler
else:  # Assuming 1.XX
    import keras.backend as K
    from keras.utils.multi_gpu_utils import multi_gpu_model
    # from keras.optimizers import Adam
Beispiel #7
0
    def _create_docker_file(self):
        """Creates a Dockerfile."""
        if self.docker_base_image is None:
            # Updating the name for RC's to match with the TF generated RC docker image names.
            tf_version = VERSION.replace("-rc", "rc")
            # Get the TF docker base image to use based on the current
            # TF version.
            self.docker_base_image = "tensorflow/tensorflow:{}".format(
                tf_version)
            if (self.chief_config.accelerator_type !=
                    machine_config.AcceleratorType.NO_ACCELERATOR):
                self.docker_base_image += "-gpu"

            # Add python 3 tag for TF version <= 2.1.0
            # https://hub.docker.com/r/tensorflow/tensorflow
            if VERSION != "latest":
                v = VERSION.split(".")
                if float(v[0] + "." + v[1]) <= 2.1:
                    self.docker_base_image += "-py3"

        if not self._base_image_exist():
            warnings.warn("Docker base image {} does not exist.".format(
                self.docker_base_image))
            if "dev" in self.docker_base_image:
                # Except for the latest TF nightly, other nightlies
                # do not have corresponding docker images.
                newtag = "nightly"
                if self.docker_base_image.endswith("-gpu"):
                    newtag += "-gpu"
                self.docker_base_image = (
                    self.docker_base_image.split(":")[0] + ":" + newtag)
                warnings.warn("Using the latest TF nightly build.")
            else:
                warnings.warn(
                    "Using the latest stable TF docker image available: "
                    "`tensorflow/tensorflow:latest`"
                    "Please see https://hub.docker.com/r/tensorflow/tensorflow/ "
                    "for details on available docker images.")
                newtag = "tensorflow/tensorflow:latest"
                if self.docker_base_image.endswith("-gpu"):
                    newtag += "-gpu"
                self.docker_base_image = newtag

        lines = [
            "FROM {}".format(self.docker_base_image),
            "WORKDIR {}".format(self.destination_dir),
        ]

        if self.requirements_txt is not None:
            _, requirements_txt_name = os.path.split(self.requirements_txt)
            dst_requirements_txt = os.path.join(requirements_txt_name)
            requirements_txt_path = os.path.join(self.destination_dir,
                                                 requirements_txt_name)
            lines.append("COPY {} {}".format(requirements_txt_path,
                                             requirements_txt_path))
            # install pip requirements from requirements_txt if it exists.
            lines.append("RUN if [ -e {} ]; "
                         "then pip install --no-cache -r {}; "
                         "fi".format(dst_requirements_txt,
                                     dst_requirements_txt))
        if self.entry_point is None:
            lines.append("RUN pip install tensorflow-cloud")

        if self.worker_config is not None and machine_config.is_tpu_config(
                self.worker_config):
            lines.append("RUN pip install cloud-tpu-client")

        # Copies the files from the `destination_dir` in docker daemon location
        # to the `destination_dir` in docker container filesystem.
        lines.append("COPY {} {}".format(self.destination_dir,
                                         self.destination_dir))

        docker_entry_point = self.preprocessed_entry_point or self.entry_point
        _, docker_entry_point_file_name = os.path.split(docker_entry_point)

        # Using `ENTRYPOINT` here instead of `CMD` specifically because
        # we want to support passing user code flags.
        lines.extend([
            'ENTRYPOINT ["python", "{}"]'.format(docker_entry_point_file_name)
        ])

        content = "\n".join(lines)
        _, self.docker_file_path = tempfile.mkstemp()
        with open(self.docker_file_path, "w") as f:
            f.write(content)
Beispiel #8
0
def _create_request_dict(
    job_id,
    region,
    image_uri,
    chief_config,
    worker_count,
    worker_config,
    entry_point_args,
    job_labels={},
):
    """Creates request dictionary for the CAIP training service.

    Args:
        job_id: String, unique job id.
        region: GCP region name.
        image_uri: The docker image uri.
        chief_config: `MachineConfig` that represents the configuration for
            the chief worker in a distribution cluster.
        worker_count: Integer that represents the number of general workers
            in a distribution cluster. This count does not include the
            chief worker.
        worker_config: `MachineConfig` that represents the configuration for
            the general workers in a distribution cluster.
        entry_point_args: Command line arguments to pass to the
            `entry_point` program.
        job_labels: Dict of str: str. Labels to organize jobs. See 
            https://cloud.google.com/ai-platform/training/docs/resource-labels.

    Returns:
        The job request dictionary.
    """
    training_input = {}
    training_input["region"] = region
    training_input["scaleTier"] = "custom"
    training_input["masterType"] = gcp.get_machine_type(
        chief_config.cpu_cores, chief_config.memory,
        chief_config.accelerator_type)

    # Set master config
    chief_machine_config = {}
    chief_machine_config["imageUri"] = image_uri
    chief_machine_config["acceleratorConfig"] = {}
    chief_machine_config["acceleratorConfig"]["count"] = str(
        chief_config.accelerator_count)
    chief_machine_config["acceleratorConfig"][
        "type"] = gcp.get_accelerator_type(chief_config.accelerator_type.value)

    training_input["masterConfig"] = chief_machine_config
    training_input["workerCount"] = str(worker_count)

    if worker_count > 0:
        training_input["workerType"] = gcp.get_machine_type(
            worker_config.cpu_cores,
            worker_config.memory,
            worker_config.accelerator_type,
        )

        worker_machine_config = {}
        worker_machine_config["imageUri"] = image_uri
        worker_machine_config["acceleratorConfig"] = {}
        worker_machine_config["acceleratorConfig"]["count"] = str(
            worker_config.accelerator_count)
        worker_machine_config["acceleratorConfig"][
            "type"] = gcp.get_accelerator_type(
                worker_config.accelerator_type.value)

        if machine_config.is_tpu_config(worker_config):
            # AI Platform runtime version spec is required for training
            # on cloud TPUs.
            v = VERSION.split(".")
            worker_machine_config["tpuTfVersion"] = v[0] + "." + v[1]
        training_input["workerConfig"] = worker_machine_config

    if entry_point_args is not None:
        training_input["args"] = entry_point_args

    # This is temporarily required so that the `TF_CONFIG` generated by
    # CAIP uses the keyword 'chief' instead of 'master'.
    training_input["use_chief_in_tf_config"] = True
    request_dict = {}
    request_dict["jobId"] = job_id
    request_dict["trainingInput"] = training_input
    if job_labels:
        request_dict["labels"] = job_labels
    return request_dict