Beispiel #1
0
def get_tf_model(model_or_spec, settings):
    """Convenience function for getting a tf.keras model from a wrapper or
    JSON file.

    The input to this function can be one of the model wrapper classes
    (e.g., ObjectDetector), a dict used to instantiate a wrapper, or
    the path to a file containing JSON for such a dict.

    """
    if isinstance(model_or_spec, tf.keras.Model):
        return model_or_spec
    elif isinstance(model_or_spec, (Deepnet, ObjectDetector)):
        return model_or_spec._model
    else:
        if isinstance(model_or_spec, dict):
            model_dict = model_or_spec
        else:
            with open(model_or_spec, 'r') as fin:
                model_dict = json.load(fin)

        model_settings = ensure_settings(settings)
        # This is the only valid input format for tflite; it doesn't
        # know how to read files (as of TF 2.3)
        model_settings.input_image_format = 'pixel_values'

        return create_model(model_dict, settings=export_settings)._model
Beispiel #2
0
def box_detector(model, input_settings):
    settings = ensure_settings(input_settings)
    settings.rescale_type = PAD

    network = model['image_network']
    reader = BoundingBoxImageReader(network, settings)
    nclasses = number_of_classes(model)
    loader = ImageLoader(network)

    yolo_trunk = YoloTrunk(network, nclasses)
    yolo_branches = YoloBranches(network, nclasses)
    locator = BoxLocator(network, nclasses, settings)

    if settings.input_image_format == 'pixel_values':
        image_shape = get_image_tensor_shape(settings)
        image_input = kl.Input(image_shape, dtype=tf.float32, name='image')
    else:
        image_input = kl.Input((1, ), dtype=tf.string, name='image')

    raw_image, original_shape = reader(image_input)

    image = loader(raw_image)
    layer_outputs = yolo_trunk(image)
    predictions = yolo_branches(layer_outputs)
    all_outputs = locator(predictions, tf.cast(original_shape, tf.float32))

    return tf.keras.Model(inputs=image_input, outputs=all_outputs)
Beispiel #3
0
def image_model(network, input_settings):
    settings = ensure_settings(input_settings)

    if is_yolo_model(network):
        model = box_detector(network, settings)
    else:
        model = deepnet_model(network, settings)

    if settings.load_pretrained_weights:
        load_pretrained_weights(model, network['image_network'])

    return model
Beispiel #4
0
def create_model(model, settings=None):
    settings_object = ensure_settings(settings)

    if bigml_resource(model):
        return create_model(bigml_resource(model), settings=settings)
    elif is_deepnet(model):
        if is_yolo_model(model):
            return ObjectDetector(model, settings_object)
        else:
            return Deepnet(model, settings_object)
    elif isinstance(model, dict):
        raise ValueError('Model format not recognized: %s' % str(model.keys()))
    else:
        raise TypeError('`model` argument cannot be a %s' % str(type(model)))
Beispiel #5
0
def model_from_dictionary(model_dict, settings):
    settings_object = ensure_settings(settings)

    if bigml_resource(model_dict):
        model = bigml_resource(model)
    else:
        model = model_dict

    if is_deepnet(model):
        if is_yolo_model(model):
            return ObjectDetector(model, settings_object)
        else:
            return Deepnet(model, settings_object)
    elif isinstance(model, dict):
        raise ValueError("Model format not recognized: %s" % str(model.keys()))
Beispiel #6
0
def deepnet_model(model, input_settings):
    settings = ensure_settings(input_settings)

    preprocessor = Preprocessor(model, settings)
    trees = tree_preprocessor(model)

    raw_inputs = instantiate_inputs(model, settings)
    inputs = preprocessor(raw_inputs)

    if trees:
        treeed_inputs = trees(inputs)
    else:
        treeed_inputs = None

    predictions = apply_layers(model, settings, inputs, treeed_inputs)
    return tf.keras.Model(inputs=raw_inputs, outputs=predictions)
Beispiel #7
0
def box_detector(model, input_settings):
    settings = ensure_settings(input_settings)
    settings.rescale_type = PAD

    network = model["image_network"]
    reader = BoundingBoxImageReader(network, settings)
    nclasses = number_of_classes(model)
    loader = ImageLoader(network)

    yolo = Yolo(network, nclasses)
    locator = BoxLocator(network, nclasses, settings)

    assert len(model["preprocess"]) == 1
    image_input = instantiate_inputs(model, settings)
    raw_image, original_shape = reader(image_input)

    image = loader(raw_image)
    predictions = yolo(image)
    all_outputs = locator(predictions, tf.cast(original_shape, tf.float32))

    return tf.keras.Model(inputs=image_input, outputs=all_outputs)
Beispiel #8
0
def make_image_reader(input_format, target_shape, file_prefix, read_settings):
    settings = ensure_settings(read_settings)

    n_chan = target_shape[-1]
    prefix = path_prefix(file_prefix)

    def read_image(path_or_bytes):
        if input_format == "pixel_values":
            raw = path_or_bytes
        else:
            if input_format == "image_bytes":
                img_bytes = path_or_bytes
            else:
                path = tf.strings.join([prefix, path_or_bytes])
                img_bytes = tf.io.read_file(path)

            raw = tf.io.decode_jpeg(img_bytes, dct_method=DCT, channels=n_chan)

        return rescale(settings, target_shape, raw)

    return read_image
Beispiel #9
0
def convert(model, settings, output_path, to_format):
    """Convert some structure describing a wrapped model to a given output
    format.

    The first input to this function can be one of the model wrapper
    classes (e.g., ObjectDetector), a dict used to instantiate a
    wrapper, the path to a file containing JSON for such a dict, or
    the path to a bundled saved model.

    The second a dict of settings for the model, which can contain
    things like the IOU threshold for non-max suppression (see
    sensenet.model.settings).  Note that if the first argument is an
    already-instantiated model, this argument is ignored.

    The third is the path to which to output the converted model.

    The fourth is the format to which to convert the model, any of
    `smbundle`, `tflite`, `tfjs`, or `h5`, the latter of which saves
    only the weights of the model in keras h5 format without saving
    the layer configs.

    On completion, the requested file is written to the provided path.

    """
    if isinstance(model, SaveableModel):
        model_object = model
    else:
        model_settings = ensure_settings(settings)
        model_object = create_model(model, settings=model_settings)

    if to_format == "tflite":
        model_object.save_tflite(output_path)
    elif to_format == "tfjs":
        model_object.save_tfjs(output_path)
    elif to_format == "smbundle":
        model_object.save_bundle(output_path)
    elif to_format == "h5":
        model_object.save_weights(output_path)
    else:
        raise ValueError('Format "%s" unknown' % str(to_format))