def onnx_convert_with_savedmodel(keras_model_file, output_file, op_set,
                                 inputs_as_nchw):
    # only available for TF 2.x
    if not tf.__version__.startswith('2'):
        raise ValueError('savedmodel convert only support in TF 2.x env')

    custom_object_dict = get_custom_objects()
    model = load_model(keras_model_file, custom_objects=custom_object_dict)

    # assume only 1 input tensor for image
    assert len(model.inputs) == 1, 'invalid input tensor number.'

    # export to saved model
    model.save('tmp_savedmodel', save_format='tf')

    # use tf2onnx to convert to onnx model
    if inputs_as_nchw:
        cmd = 'python -m tf2onnx.convert --saved-model tmp_savedmodel --inputs-as-nchw {} --output {} --opset {}'.format(
            model.inputs[0].name, output_file, op_set)
    else:
        cmd = 'python -m tf2onnx.convert --saved-model tmp_savedmodel --output {} --opset {}'.format(
            output_file, op_set)

    process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
    output, error = process.communicate()

    # clean saved model
    shutil.rmtree('tmp_savedmodel')
Ejemplo n.º 2
0
def load_eval_model(model_path):
    # support of tflite model
    if model_path.endswith('.tflite'):
        from tensorflow.lite.python import interpreter as interpreter_wrapper
        model = interpreter_wrapper.Interpreter(model_path=model_path)
        model.allocate_tensors()
        model_format = 'TFLITE'

    # support of MNN model
    elif model_path.endswith('.mnn'):
        model = MNN.Interpreter(model_path)
        model_format = 'MNN'

    # support of TF 1.x frozen pb model
    elif model_path.endswith('.pb'):
        model = load_graph(model_path)
        model_format = 'PB'

    # support of ONNX model
    elif model_path.endswith('.onnx'):
        model = onnxruntime.InferenceSession(model_path)
        model_format = 'ONNX'

    # normal keras h5 model
    elif model_path.endswith('.h5'):
        custom_object_dict = get_custom_objects()

        model = load_model(model_path, compile=False, custom_objects=custom_object_dict)
        model_format = 'H5'
        K.set_learning_phase(0)
    else:
        raise ValueError('invalid model file')

    return model, model_format
def main():
    parser = argparse.ArgumentParser(description='tf.keras model FLOPs & PARAMs checking tool')
    parser.add_argument('--model_path', type=str, required=True, help='model file to evaluate')
    parser.add_argument('--model_input_shape', type=str, required=False, default=None, help='model image input shape as <height>x<width>, optional')
    args = parser.parse_args()

    custom_object_dict = get_custom_objects()
    model = load_model(args.model_path, compile=False, custom_objects=custom_object_dict)

    batch, height, width, channel = model.input.shape.as_list()

    if args.model_input_shape:
        height, width = args.model_input_shape.split('x')
        height, width = int(height), int(width)

    # to calculate FLOPs we need to use fixed input shape & batch size
    assert height and width and channel, 'input shape should be specified'

    if not batch:
        # if dynamic batch, rebuild model with batch_size=1
        input_tensor = Input(shape=(height, width, channel), batch_size=1)
        output_tensor = model(input_tensor)
        model = Model(input_tensor, output_tensor)

    K.set_learning_phase(0)
    get_flops(model)
def onnx_convert(keras_model_file, output_file, op_set, inputs_as_nchw):
    import tf2onnx
    custom_object_dict = get_custom_objects()
    model = load_model(keras_model_file, custom_objects=custom_object_dict)

    # assume only 1 input tensor for image
    assert len(model.inputs) == 1, 'invalid input tensor number.'

    spec = (tf.TensorSpec(model.inputs[0].shape,
                          tf.float32,
                          name="image_input"), )

    if inputs_as_nchw:
        nchw_inputs_list = [model.inputs[0].name]
    else:
        nchw_inputs_list = None

    # Reference:
    # https://github.com/onnx/tensorflow-onnx#python-api-reference
    model_proto, _ = tf2onnx.convert.from_keras(
        model,
        input_signature=spec,
        custom_ops=None,
        opset=op_set,
        inputs_as_nchw=nchw_inputs_list,
        output_path=output_file)
Ejemplo n.º 5
0
def post_train_quant_convert(keras_model_file, annotation_file, sample_num,
                             model_input_shape, output_file):
    #get input_shapes for converter
    input_shapes = list((1, ) + model_input_shape + (3, ))

    with open(annotation_file) as f:
        annotation_lines = f.readlines()

    custom_object_dict = get_custom_objects()

    model = load_model(keras_model_file, custom_objects=custom_object_dict)
    converter = tf.lite.TFLiteConverter.from_keras_model(model)

    def data_generator():
        n = len(annotation_lines)
        i = 0
        for num in range(sample_num):
            image, _ = get_ground_truth_data(annotation_lines[i],
                                             model_input_shape,
                                             augment=True)
            i = (i + 1) % n
            image = np.array([image], dtype=np.float32)
            yield [image]

    converter.optimizations = [tf.lite.Optimize.DEFAULT]
    #converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
    #converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_LATENCY]
    converter.representative_dataset = tf.lite.RepresentativeDataset(
        data_generator)

    #converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]

    tflite_model = converter.convert()
    with open(output_file, "wb") as f:
        f.write(tflite_model)
Ejemplo n.º 6
0
def validate_yolo_model(model_path, image_file, anchors, class_names,
                        model_image_size, loop_count):

    custom_object_dict = get_custom_objects()
    model = load_model(model_path,
                       compile=False,
                       custom_objects=custom_object_dict)

    img = Image.open(image_file)
    image = np.array(img, dtype='uint8')
    image_data = preprocess_image(img, model_image_size)
    image_shape = img.size

    # predict once first to bypass the model building time
    model.predict([image_data])

    start = time.time()
    for i in range(loop_count):
        prediction = model.predict([image_data])
    end = time.time()
    print("Average Inference time: {:.8f}ms".format(
        (end - start) * 1000 / loop_count))

    handle_prediction(prediction, image_file, image, image_shape, anchors,
                      class_names, model_image_size)
    return
Ejemplo n.º 7
0
def validate_yolo_model(model_path, image_file, anchors, class_names, model_image_size, loop_count):

    custom_object_dict = get_custom_objects()
    model = load_model(model_path, compile=False, custom_objects=custom_object_dict)

    img = Image.open(image_file)
    image = np.array(img, dtype='uint8')
    image_data = preprocess_image(img, model_image_size)
    #origin image shape, in (height, width) format
    image_shape = tuple(reversed(img.size))

    # predict once first to bypass the model building time
    model.predict([image_data])

    start = time.time()
    for i in range(loop_count):
        prediction = model.predict([image_data])
    end = time.time()
    print("Average Inference time: {:.8f}ms".format((end - start) * 1000 /loop_count))
    if type(prediction) is not list:
        prediction = [prediction]

    prediction.sort(key=lambda x: len(x[0]))
    handle_prediction(prediction, image_file, image, image_shape, anchors, class_names, model_image_size)
    return
def _convert_tf2_model(flags):
    """Calls function to convert the TensorFlow 2.0 model into a TFLite model.

  Args:
    flags: argparse.Namespace object.

  Raises:
    ValueError: Unsupported file format.
  """
    # Load the model.
    if flags.saved_model_dir:
        converter = lite.TFLiteConverterV2.from_saved_model(
            flags.saved_model_dir)
    elif flags.keras_model_file:
        custom_object_dict = get_custom_objects()
        model = keras.models.load_model(flags.keras_model_file,
                                        custom_objects=custom_object_dict)
        # upsampling 20210517
        model.input.set_shape(1 + model.input.shape[1:])
        converter = lite.TFLiteConverterV2.from_keras_model(model)

    # Convert the model.
    tflite_model = converter.convert()
    with open(flags.output_file, "wb") as f:
        f.write(tflite_model)
def onnx_convert(keras_model_file, output_file, op_set):
    custom_object_dict = get_custom_objects()
    model = load_model(keras_model_file, custom_objects=custom_object_dict)

    # convert to onnx model
    onnx_model = keras2onnx.convert_keras(model, model.name, custom_op_conversions=custom_object_dict, target_opset=op_set)

    # save converted onnx model
    onnx.save_model(onnx_model, output_file)
Ejemplo n.º 10
0
def main():
    parser = argparse.ArgumentParser(
        description='tf.keras model FLOPs & PARAMs checking tool')
    parser.add_argument('--model_path',
                        help='model file to evaluate',
                        type=str,
                        required=True)
    args = parser.parse_args()

    custom_object_dict = get_custom_objects()
    model = load_model(args.model_path,
                       compile=False,
                       custom_objects=custom_object_dict)

    get_flops(model)
def _get_toco_converter(flags):
  """Makes a TFLiteConverter object based on the flags provided.

  Args:
    flags: argparse.Namespace object containing TFLite flags.

  Returns:
    TFLiteConverter object.

  Raises:
    ValueError: Invalid flags.
  """
  # Parse input and output arrays.
  input_arrays = _parse_array(flags.input_arrays)
  input_shapes = None
  if flags.input_shapes:
    input_shapes_list = [
        _parse_array(shape, type_fn=int)
        for shape in flags.input_shapes.split(":")
    ]
    input_shapes = dict(zip(input_arrays, input_shapes_list))
  output_arrays = _parse_array(flags.output_arrays)

  converter_kwargs = {
      "input_arrays": input_arrays,
      "input_shapes": input_shapes,
      "output_arrays": output_arrays
  }

  # Create TFLiteConverter.
  if flags.graph_def_file:
    converter_fn = lite.TFLiteConverter.from_frozen_graph
    converter_kwargs["graph_def_file"] = flags.graph_def_file
  elif flags.saved_model_dir:
    converter_fn = lite.TFLiteConverter.from_saved_model
    converter_kwargs["saved_model_dir"] = flags.saved_model_dir
    converter_kwargs["tag_set"] = _parse_set(flags.saved_model_tag_set)
    converter_kwargs["signature_key"] = flags.saved_model_signature_key
  elif flags.keras_model_file:
    converter_fn = lite.TFLiteConverter.from_keras_model_file
    converter_kwargs["model_file"] = flags.keras_model_file
    custom_object_dict = get_custom_objects()
    converter_kwargs["custom_objects"] = custom_object_dict
  else:
    raise ValueError("--graph_def_file, --saved_model_dir, or "
                     "--keras_model_file must be specified.")

  return converter_fn(**converter_kwargs)
def coreml_convert(input_model_file, output_file, model_image_size):
    if input_model_file.endswith('.h5'):
        if not tf.__version__.startswith('2'):
            raise ValueError(
                'tf.keras model convert only support in TF 2.x env')
        # tf.keras h5 model
        custom_object_dict = get_custom_objects()
        keras_model = load_model(input_model_file,
                                 custom_objects=custom_object_dict)

        # get input, output node names for the TF graph from tf.keras model
        # assume only 1 input
        input_name = keras_model.inputs[0].name.split(':')[0]
        output_names = [
            output.name.split(':')[0].split('/')[-1]
            for output in keras_model.outputs
        ]

        assert len(
            output_names
        ) == 1, 'h5 model convert only support YOLOv2 family with 1 prediction output.'

    elif input_model_file.endswith('.pb'):
        # NOTE: TF 1.x frozen pb graph need to specify input/output tensor name
        # so we need to hardcode the input/output tensor names here to get them from model
        input_name = 'image_input'

        # YOLOv2 model with 1 prediction output
        #output_names = ['predict_conv/BiasAdd']

        # Tiny YOLOv3 model with 2 prediction outputs
        output_names = ['predict_conv_1/BiasAdd', 'predict_conv_2/BiasAdd']

        # YOLOv3 model with 3 prediction outputs
        #output_names = ['predict_conv_1/BiasAdd', 'predict_conv_2/BiasAdd', 'predict_conv_3/BiasAdd']
    else:
        raise ValueError('unsupported model type')

    input_name_shape_dict = {input_name: (1, ) + model_image_size + (3, )}
    # convert to CoreML model file
    model = tfcoreml.convert(tf_model_path=input_model_file,
                             mlmodel_path=output_file,
                             input_name_shape_dict=input_name_shape_dict,
                             output_feature_names=output_names,
                             minimum_ios_deployment_target='13')
def onnx_convert_with_savedmodel(keras_model_file, output_file, op_set):
    # only available for TF 2.x
    if not tf.__version__.startswith('2'):
        raise ValueError('savedmodel convert only support in TF 2.x env')

    custom_object_dict = get_custom_objects()
    model = load_model(keras_model_file, custom_objects=custom_object_dict)

    # export to saved model
    model.save('tmp_savedmodel', save_format='tf')

    # use tf2onnx to convert to onnx model
    cmd = 'python -m tf2onnx.convert --saved-model tmp_savedmodel --output {} --opset {}'.format(output_file, op_set)
    process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
    output, error = process.communicate()

    # clean saved model
    shutil.rmtree('tmp_savedmodel')
def onnx_convert_old(keras_model_file, output_file, op_set):
    """
    old implementation to convert keras model to onnx,
    using deprecated keras2onnx package
    """
    import keras2onnx
    custom_object_dict = get_custom_objects()
    model = load_model(keras_model_file, custom_objects=custom_object_dict)

    # convert to onnx model
    onnx_model = keras2onnx.convert_keras(
        model,
        model.name,
        custom_op_conversions=custom_object_dict,
        target_opset=op_set)

    # save converted onnx model
    onnx.save_model(onnx_model, output_file)
def post_train_quant_convert(keras_model_file, dataset_path, dataset,
                             sample_num, model_input_shape, output_file):
    #get input_shapes for converter
    input_shapes = list((1, ) + model_input_shape + (3, ))

    #prepare quant data generator
    data_gen = SegmentationGenerator(
        dataset_path,
        dataset,
        1,  #batch_size
        1,  #num_classes, here we don't really use it
        target_size=model_input_shape[::-1],
        weighted_type=None,
        is_eval=False,
        augment=True)

    custom_object_dict = get_custom_objects()
    model = load_model(keras_model_file, custom_objects=custom_object_dict)
    converter = tf.lite.TFLiteConverter.from_keras_model(model)

    def data_generator():
        i = 0
        for n, (image_data, y_true) in enumerate(data_gen):
            i += 1
            if i > sample_num:
                break

            yield [image_data]

    converter.optimizations = [tf.lite.Optimize.DEFAULT]
    #converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
    #converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_LATENCY]
    converter.representative_dataset = tf.lite.RepresentativeDataset(
        data_generator)

    #converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]

    tflite_model = converter.convert()
    with open(output_file, "wb") as f:
        f.write(tflite_model)
Ejemplo n.º 16
0
def validate_deeplab_model(model_path, image_file, class_names,
                           model_image_size, do_crf, label_file, loop_count):
    # load model
    custom_object_dict = get_custom_objects()
    model = load_model(model_path,
                       compile=False,
                       custom_objects=custom_object_dict)
    K.set_learning_phase(0)

    num_classes = model.output.shape.as_list()[-1]
    if class_names:
        # check if classes number match with model prediction
        assert num_classes == len(
            class_names), 'classes number mismatch with model.'

    # prepare input image
    img = Image.open(image_file)
    image_data = preprocess_image(img, model_image_size)
    image = image_data[0].astype('uint8')
    #origin image shape, in (width, height) format
    origin_image_size = img.size

    # predict once first to bypass the model building time
    model.predict([image_data])

    # get predict output
    start = time.time()
    for i in range(loop_count):
        prediction = model.predict([image_data])
    end = time.time()
    print("Average Inference time: {:.8f}ms".format(
        (end - start) * 1000 / loop_count))

    handle_prediction(prediction, image, np.array(img), num_classes,
                      class_names, model_image_size, origin_image_size, do_crf,
                      label_file)
Ejemplo n.º 17
0
def generate_heatmap(image_path, model_path, heatmap_path, class_names=None):
    # load model
    custom_object_dict = get_custom_objects()
    model = load_model(model_path, custom_objects=custom_object_dict)
    K.set_learning_phase(0)
    model.summary()

    # get image file list or single image
    if os.path.isdir(image_path):
        jpeg_files = glob.glob(os.path.join(image_path, '*.jpeg'))
        jpg_files = glob.glob(os.path.join(image_path, '*.jpg'))
        image_list = jpeg_files + jpg_files

        #assert os.path.isdir(heatmap_path), 'need to provide a path for output heatmap'
        os.makedirs(heatmap_path, exist_ok=True)
        heatmap_list = [
            os.path.join(
                heatmap_path,
                os.path.splitext(os.path.basename(image_name))[0] + '.jpg')
            for image_name in image_list
        ]
    else:
        image_list = [image_path]
        heatmap_list = [heatmap_path]

    # loop the sample list to generate all heatmaps
    for i, (image_file,
            heatmap_file) in enumerate(zip(image_list, heatmap_list)):
        # process input
        target_size = get_target_size(model)
        img = load_and_crop_img(image_file,
                                target_size=target_size,
                                interpolation='nearest:random')
        img = np.array(img)
        x = normalize_image(img)
        x = np.expand_dims(x, axis=0)

        # predict and get output
        preds = model.predict(x)
        index = np.argmax(preds[0])
        score = preds[0][index]
        max_output = model.output[:, index]

        # detect last conv layer
        last_conv_index = detect_last_conv(model)
        last_conv_layer = model.layers[last_conv_index]
        # get gradient of the last conv layer to the predicted class
        grads = K.gradients(max_output, last_conv_layer.output)[0]
        # pooling to get the feature gradient
        pooled_grads = K.mean(grads, axis=(0, 1, 2))
        # run the predict to get value
        iterate = K.function([model.input],
                             [pooled_grads, last_conv_layer.output[0]])
        pooled_grads_value, conv_layer_output_value = iterate([x])

        # apply the activation to each channel of the conv'ed feature map
        for j in range(pooled_grads_value.shape[0]):
            conv_layer_output_value[:, :, j] *= pooled_grads_value[j]

        # get mean of each channel, which is the heatmap
        heatmap = np.mean(conv_layer_output_value, axis=-1)
        # normalize heatmap to 0~1
        heatmap = np.maximum(heatmap, 0)
        heatmap /= np.max(heatmap)
        #plt.matshow(heatmap)
        #plt.show()

        # overlap heatmap to frame image
        #img = cv2.imread(image_file)
        img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
        img = cv2.resize(img, (224, 224))
        heatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0]))
        heatmap = denormalize_image(heatmap)
        heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
        superimposed_img = heatmap * 0.4 + img

        # show predict class index or name on image
        cv2.putText(superimposed_img,
                    '{name}:{conf:.3f}'.format(
                        name=class_names[index] if class_names else index,
                        conf=float(score)), (10, 30),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=1,
                    color=(0, 0, 255),
                    thickness=1,
                    lineType=cv2.LINE_AA)

        # save overlaped image
        cv2.imwrite(heatmap_file, superimposed_img)
        print("generate heatmap file {} ({}/{})".format(
            heatmap_file, i + 1, len(image_list)))
Ejemplo n.º 18
0
def keras_to_tensorflow(args):
    # If output_model path is relative and in cwd, make it absolute from root
    output_model = args.output_model
    if str(Path(output_model).parent) == '.':
        output_model = str((Path.cwd() / output_model))

    output_fld = Path(output_model).parent
    output_model_name = Path(output_model).name
    output_model_stem = Path(output_model).stem
    output_model_pbtxt_name = output_model_stem + '.pbtxt'

    # Create output directory if it does not exist
    Path(output_model).parent.mkdir(parents=True, exist_ok=True)

    if args.channels_first:
        K.set_image_data_format('channels_first')
    else:
        K.set_image_data_format('channels_last')

    custom_object_dict = get_custom_objects()

    model = load_input_model(args.input_model,
                             args.input_model_json,
                             args.input_model_yaml,
                             custom_objects=custom_object_dict)

    # TODO(amirabdi): Support networks with multiple inputs
    orig_output_node_names = [
        node.name.split(':')[0] for node in model.outputs
    ]
    #orig_output_node_names = [node.op.name for node in model.outputs]
    if args.output_nodes_prefix:
        num_output = len(orig_output_node_names)
        pred = [None] * num_output
        converted_output_node_names = [None] * num_output

        # Create dummy tf nodes to rename output
        for i in range(num_output):
            converted_output_node_names[i] = '{}{}'.format(
                args.output_nodes_prefix, i)
            pred[i] = tf.identity(model.outputs[i],
                                  name=converted_output_node_names[i])
    else:
        converted_output_node_names = orig_output_node_names
    logging.info('Converted output node names are: %s',
                 str(converted_output_node_names))

    sess = K.get_session()
    if args.output_meta_ckpt:
        saver = tf.train.Saver()
        saver.save(sess, str(output_fld / output_model_stem))

    if args.save_graph_def:
        tf.train.write_graph(sess.graph.as_graph_def(),
                             str(output_fld),
                             output_model_pbtxt_name,
                             as_text=True)
        logging.info('Saved the graph definition in ascii format at %s',
                     str(Path(output_fld) / output_model_pbtxt_name))

    if args.quantize:
        from tensorflow.tools.graph_transforms import TransformGraph
        transforms = ["quantize_weights", "quantize_nodes"]
        transformed_graph_def = TransformGraph(sess.graph.as_graph_def(), [],
                                               converted_output_node_names,
                                               transforms)
        constant_graph = graph_util.convert_variables_to_constants(
            sess, transformed_graph_def, converted_output_node_names)
    else:
        constant_graph = graph_util.convert_variables_to_constants(
            sess, sess.graph.as_graph_def(), converted_output_node_names)

    graph_io.write_graph(constant_graph,
                         str(output_fld),
                         output_model_name,
                         as_text=False)
    logging.info('Saved the freezed graph at %s',
                 str(Path(output_fld) / output_model_name))