示例#1
0
    def test_convert_hub_module(self):
        self._create_hub_module()

        tf_saved_model_conversion_v2.convert_tf_hub_module(
            os.path.join(self._tmp_dir, HUB_MODULE_DIR),
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR), 'default')

        weights = [{
            'paths': ['group1-shard1of1.bin'],
            'weights': [{
                'shape': [2],
                'name': 'module/Variable',
                'dtype': 'float32'
            }]
        }]
        tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
        # Check model.json and weights manifest.
        with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
            model_json = json.load(f)
        self.assertTrue(model_json['modelTopology'])

        weights_manifest = model_json['weightsManifest']
        self.assertEqual(weights_manifest, weights)

        self.assertTrue(
            glob.glob(os.path.join(self._tmp_dir, SAVED_MODEL_DIR,
                                   'group*-*')))
示例#2
0
    def test_convert_hub_module_v2(self):
        self._create_saved_model()
        module_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
        tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)

        tf_saved_model_conversion_v2.convert_tf_hub_module(
            module_path, tfjs_path, "serving_default", "serve")

        # Check model.json and weights manifest.
        with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
            model_json = json.load(f)
        self.assertTrue(model_json['modelTopology'])
        self.assertIsNot(model_json['modelTopology']['versions'], None)
        signature = model_json['userDefinedMetadata']['signature']
        self.assertIsNot(signature, None)
        self.assertIsNot(signature['inputs'], None)
        self.assertIsNot(signature['outputs'], None)

        weights_manifest = model_json['weightsManifest']
        self.assertCountEqual(weights_manifest[0]['paths'],
                              ['group1-shard1of1.bin'])
        self.assertIn('weights', weights_manifest[0])

        self.assertTrue(
            glob.glob(os.path.join(self._tmp_dir, SAVED_MODEL_DIR,
                                   'group*-*')))
    def test_convert_hub_module_v1_sharded(self):
        self._create_hub_module()
        module_path = os.path.join(self._tmp_dir, HUB_MODULE_DIR)
        tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)

        # Do initial conversion without sharding.
        tf_saved_model_conversion_v2.convert_tf_hub_module(
            module_path, tfjs_path)
        weight_files = glob.glob(os.path.join(tfjs_path, 'group*.bin'))

        # Get size of weights in bytes after graph optimizations.
        optimized_total_weight = sum(
            [os.path.getsize(f) for f in weight_files])

        # Due to the shard size, there ought to be 3 shards after conversion.
        weight_shard_size_bytes = int(optimized_total_weight * 0.4)

        tfjs_path = os.path.join(self._tmp_dir, 'sharded_model')
        # Convert Hub model again with shard argument set.
        tf_saved_model_conversion_v2.convert_tf_hub_module(
            module_path,
            tfjs_path,
            weight_shard_size_bytes=weight_shard_size_bytes)

        weight_files = sorted(glob.glob(os.path.join(tfjs_path, 'group*.bin')))
        self.assertEqual(len(weight_files), 3)
        weight_file_sizes = [os.path.getsize(f) for f in weight_files]

        self.assertEqual(sum(weight_file_sizes), optimized_total_weight)
        self.assertEqual(weight_file_sizes[0], weight_file_sizes[1])
        self.assertLess(weight_file_sizes[2], weight_file_sizes[0])
示例#4
0
    def test_convert_hub_module_v2(self):
        self._create_saved_model()
        module_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
        tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)

        tf_saved_model_conversion_v2.convert_tf_hub_module(
            module_path, tfjs_path, "serving_default", "serve")

        weights = [{
            'paths': ['group1-shard1of1.bin'],
            'weights': [{
                'shape': [],
                'name': 'StatefulPartitionedCall/mul',
                'dtype': 'float32'
            }]
        }]

        # Check model.json and weights manifest.
        with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
            model_json = json.load(f)
        self.assertTrue(model_json['modelTopology'])

        weights_manifest = model_json['weightsManifest']
        self.assertEqual(weights_manifest, weights)

        self.assertTrue(
            glob.glob(os.path.join(self._tmp_dir, SAVED_MODEL_DIR,
                                   'group*-*')))
示例#5
0
  def test_convert_hub_module_v1_with_metadata(self):
    self._create_hub_module()
    module_path = os.path.join(self._tmp_dir, HUB_MODULE_DIR)
    tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)

    metadata_json = {'a': 1}
    tf_saved_model_conversion_v2.convert_tf_hub_module(
        module_path, tfjs_path, metadata={'key': metadata_json})

    # Check model.json and weights manifest.
    with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
      model_json = json.load(f)
    self.assertEqual(metadata_json, model_json['userDefinedMetadata']['key'])
示例#6
0
  def test_convert_hub_module_v1(self):
    self._create_hub_module()
    module_path = os.path.join(self._tmp_dir, HUB_MODULE_DIR)
    tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)

    tf_saved_model_conversion_v2.convert_tf_hub_module(module_path, tfjs_path)

    # Check model.json and weights manifest.
    with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
      model_json = json.load(f)
    self.assertTrue(model_json['modelTopology'])

    weights_manifest = model_json['weightsManifest']
    self.assertCountEqual(weights_manifest[0]['paths'],
                          ['group1-shard1of1.bin'])
    self.assertIn('weights', weights_manifest[0])

    self.assertTrue(
        glob.glob(
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*')))
示例#7
0
文件: converter.py 项目: xueyan1/tfjs
def convert(arguments):
    args = get_arg_parser().parse_args(arguments)
    if args.show_version:
        print('\ntensorflowjs %s\n' % version.version)
        print('Dependency versions:')
        print('  keras %s' % keras.__version__)
        print('  tensorflow %s' % tf.__version__)
        return

    if not args.input_path:
        raise ValueError(
            'Missing input_path argument. For usage, use the --help flag.')
    if not args.output_path:
        raise ValueError(
            'Missing output_path argument. For usage, use the --help flag.')

    weight_shard_size_bytes = 1024 * 1024 * 4
    if args.weight_shard_size_bytes:
        if args.output_format != common.TFJS_LAYERS_MODEL:
            raise ValueError(
                'The --weight_shard_size_bytes flag is only supported under '
                'output_format=tfjs_layers_model.')
        weight_shard_size_bytes = args.weight_shard_size_bytes

    if args.input_path is None:
        raise ValueError('Error: The input_path argument must be set. '
                         'Run with --help flag for usage information.')

    input_format, output_format = _standardize_input_output_formats(
        args.input_format, args.output_format)

    quantization_dtype = (
        quantization.QUANTIZATION_BYTES_TO_DTYPES[args.quantization_bytes]
        if args.quantization_bytes else None)

    if (args.signature_name and input_format
            not in (common.TF_SAVED_MODEL, common.TF_HUB_MODEL)):
        raise ValueError(
            'The --signature_name flag is applicable only to "tf_saved_model" and '
            '"tf_hub" input format, but the current input format is '
            '"%s".' % input_format)

    # TODO(cais, piyu): More conversion logics can be added as additional
    #   branches below.
    if (input_format == common.KERAS_MODEL
            and output_format == common.TFJS_LAYERS_MODEL):
        dispatch_keras_h5_to_tfjs_layers_model_conversion(
            args.input_path,
            output_dir=args.output_path,
            quantization_dtype=quantization_dtype,
            split_weights_by_layer=args.split_weights_by_layer)
    elif (input_format == common.KERAS_MODEL
          and output_format == common.TFJS_GRAPH_MODEL):
        dispatch_keras_h5_to_tfjs_graph_model_conversion(
            args.input_path,
            output_dir=args.output_path,
            quantization_dtype=quantization_dtype,
            skip_op_check=args.skip_op_check,
            strip_debug_ops=args.strip_debug_ops)
    elif (input_format == common.KERAS_SAVED_MODEL
          and output_format == common.TFJS_LAYERS_MODEL):
        dispatch_keras_saved_model_to_tensorflowjs_conversion(
            args.input_path,
            args.output_path,
            quantization_dtype=quantization_dtype,
            split_weights_by_layer=args.split_weights_by_layer)
    elif (input_format == common.TF_SAVED_MODEL
          and output_format == common.TFJS_GRAPH_MODEL):
        tf_saved_model_conversion_v2.convert_tf_saved_model(
            args.input_path,
            args.output_path,
            signature_def=args.signature_name,
            saved_model_tags=args.saved_model_tags,
            quantization_dtype=quantization_dtype,
            skip_op_check=args.skip_op_check,
            strip_debug_ops=args.strip_debug_ops)
    elif (input_format == common.TF_HUB_MODEL
          and output_format == common.TFJS_GRAPH_MODEL):
        tf_saved_model_conversion_v2.convert_tf_hub_module(
            args.input_path,
            args.output_path,
            args.signature_name,
            args.saved_model_tags,
            skip_op_check=args.skip_op_check,
            strip_debug_ops=args.strip_debug_ops)
    elif (input_format == common.TFJS_LAYERS_MODEL
          and output_format == common.KERAS_MODEL):
        dispatch_tensorflowjs_to_keras_h5_conversion(args.input_path,
                                                     args.output_path)
    elif (input_format == common.TFJS_LAYERS_MODEL
          and output_format == common.KERAS_SAVED_MODEL):
        dispatch_tensorflowjs_to_keras_saved_model_conversion(
            args.input_path, args.output_path)
    elif (input_format == common.TFJS_LAYERS_MODEL
          and output_format == common.TFJS_LAYERS_MODEL):
        dispatch_tensorflowjs_to_tensorflowjs_conversion(
            args.input_path,
            args.output_path,
            quantization_dtype=_parse_quantization_bytes(
                args.quantization_bytes),
            weight_shard_size_bytes=weight_shard_size_bytes)
    elif (input_format == common.TFJS_LAYERS_MODEL
          and output_format == common.TFJS_GRAPH_MODEL):
        dispatch_tfjs_layers_model_to_tfjs_graph_conversion(
            args.input_path,
            args.output_path,
            quantization_dtype=_parse_quantization_bytes(
                args.quantization_bytes),
            skip_op_check=args.skip_op_check,
            strip_debug_ops=args.strip_debug_ops)
    else:
        raise ValueError(
            'Unsupported input_format - output_format pair: %s - %s' %
            (input_format, output_format))
示例#8
0
文件: converter.py 项目: Kuloud/just
def convert(arguments):
    args = get_arg_parser().parse_args(arguments)
    if args.show_version:
        print('\ntensorflowjs %s\n' % version.version)
        print('Dependency versions:')
        print('  keras %s' % tf.keras.__version__)
        print('  tensorflow %s' % tf.__version__)
        return

    if not args.input_path:
        raise ValueError(
            'Missing input_path argument. For usage, use the --help flag.')
    if not args.output_path:
        raise ValueError(
            'Missing output_path argument. For usage, use the --help flag.')

    if args.input_path is None:
        raise ValueError('Error: The input_path argument must be set. '
                         'Run with --help flag for usage information.')

    input_format, output_format = _standardize_input_output_formats(
        args.input_format, args.output_format)

    weight_shard_size_bytes = 1024 * 1024 * 4
    if args.weight_shard_size_bytes is not None:
        if (output_format
                not in (common.TFJS_LAYERS_MODEL, common.TFJS_GRAPH_MODEL)):
            raise ValueError(
                'The --weight_shard_size_bytes flag is only supported when '
                'output_format is tfjs_layers_model or tfjs_graph_model.')

        if not (isinstance(args.weight_shard_size_bytes, int)
                and args.weight_shard_size_bytes > 0):
            raise ValueError(
                'Expected weight_shard_size_bytes to be a positive integer, '
                'but got %s' % args.weight_shard_size_bytes)
        weight_shard_size_bytes = args.weight_shard_size_bytes

    quantization_dtype_map = _parse_quantization_dtype_map(
        args.quantize_float16, args.quantize_uint8, args.quantize_uint16,
        args.quantization_bytes)

    if (not args.output_node_names and input_format == common.TF_FROZEN_MODEL):
        raise ValueError(
            'The --output_node_names flag is required for "tf_frozen_model"')

    if (args.signature_name and input_format
            not in (common.TF_SAVED_MODEL, common.TF_HUB_MODEL)):
        raise ValueError(
            'The --signature_name flag is applicable only to "tf_saved_model" and '
            '"tf_hub" input format, but the current input format is '
            '"%s".' % input_format)

    if (args.control_flow_v2 and output_format != common.TFJS_GRAPH_MODEL):
        raise ValueError(
            'The --control_flow_v2 flag is applicable only to "tfjs_graph_model" '
            'as output format, but the current  output format '
            'is "%s"' % input_format, output_format)

    # TODO(cais, piyu): More conversion logics can be added as additional
    #   branches below.
    if (input_format == common.KERAS_MODEL
            and output_format == common.TFJS_LAYERS_MODEL):
        dispatch_keras_h5_to_tfjs_layers_model_conversion(
            args.input_path,
            output_dir=args.output_path,
            quantization_dtype_map=quantization_dtype_map,
            split_weights_by_layer=args.split_weights_by_layer,
            weight_shard_size_bytes=weight_shard_size_bytes)
    elif (input_format == common.KERAS_MODEL
          and output_format == common.TFJS_GRAPH_MODEL):
        dispatch_keras_h5_to_tfjs_graph_model_conversion(
            args.input_path,
            output_dir=args.output_path,
            quantization_dtype_map=quantization_dtype_map,
            skip_op_check=args.skip_op_check,
            strip_debug_ops=args.strip_debug_ops,
            weight_shard_size_bytes=weight_shard_size_bytes,
            control_flow_v2=args.control_flow_v2)
    elif (input_format == common.KERAS_SAVED_MODEL
          and output_format == common.TFJS_LAYERS_MODEL):
        dispatch_keras_saved_model_to_tensorflowjs_conversion(
            args.input_path,
            args.output_path,
            quantization_dtype_map=quantization_dtype_map,
            split_weights_by_layer=args.split_weights_by_layer,
            weight_shard_size_bytes=weight_shard_size_bytes)
    elif (input_format == common.TF_SAVED_MODEL
          and output_format == common.TFJS_GRAPH_MODEL):
        tf_saved_model_conversion_v2.convert_tf_saved_model(
            args.input_path,
            args.output_path,
            signature_def=args.signature_name,
            saved_model_tags=args.saved_model_tags,
            quantization_dtype_map=quantization_dtype_map,
            skip_op_check=args.skip_op_check,
            strip_debug_ops=args.strip_debug_ops,
            weight_shard_size_bytes=weight_shard_size_bytes,
            control_flow_v2=args.control_flow_v2)
    elif (input_format == common.TF_HUB_MODEL
          and output_format == common.TFJS_GRAPH_MODEL):
        tf_saved_model_conversion_v2.convert_tf_hub_module(
            args.input_path,
            args.output_path,
            signature=args.signature_name,
            saved_model_tags=args.saved_model_tags,
            quantization_dtype_map=quantization_dtype_map,
            skip_op_check=args.skip_op_check,
            strip_debug_ops=args.strip_debug_ops,
            weight_shard_size_bytes=weight_shard_size_bytes,
            control_flow_v2=args.control_flow_v2)
    elif (input_format == common.TFJS_LAYERS_MODEL
          and output_format == common.KERAS_MODEL):
        dispatch_tensorflowjs_to_keras_h5_conversion(args.input_path,
                                                     args.output_path)
    elif (input_format == common.TFJS_LAYERS_MODEL
          and output_format == common.KERAS_SAVED_MODEL):
        dispatch_tensorflowjs_to_keras_saved_model_conversion(
            args.input_path, args.output_path)
    elif (input_format == common.TFJS_LAYERS_MODEL
          and output_format == common.TFJS_LAYERS_MODEL):
        dispatch_tensorflowjs_to_tensorflowjs_conversion(
            args.input_path,
            args.output_path,
            quantization_dtype_map=quantization_dtype_map,
            weight_shard_size_bytes=weight_shard_size_bytes)
    elif (input_format == common.TFJS_LAYERS_MODEL
          and output_format == common.TFJS_GRAPH_MODEL):
        dispatch_tfjs_layers_model_to_tfjs_graph_conversion(
            args.input_path,
            args.output_path,
            quantization_dtype_map=quantization_dtype_map,
            skip_op_check=args.skip_op_check,
            strip_debug_ops=args.strip_debug_ops,
            weight_shard_size_bytes=weight_shard_size_bytes)
    elif (input_format == common.TF_FROZEN_MODEL
          and output_format == common.TFJS_GRAPH_MODEL):
        tf_saved_model_conversion_v2.convert_tf_frozen_model(
            args.input_path,
            args.output_node_names,
            args.output_path,
            quantization_dtype_map=quantization_dtype_map,
            skip_op_check=args.skip_op_check,
            strip_debug_ops=args.strip_debug_ops,
            weight_shard_size_bytes=weight_shard_size_bytes)
    else:
        raise ValueError(
            'Unsupported input_format - output_format pair: %s - %s' %
            (input_format, output_format))
示例#9
0
文件: converter.py 项目: caisq/tfjs-1
def _dispatch_converter(input_format, output_format, args,
                        quantization_dtype_map, weight_shard_size_bytes,
                        metadata_map):
    # TODO(cais, piyu): More conversion logics can be added as additional
    #   branches below.
    if (input_format == common.KERAS_MODEL
            and output_format == common.TFJS_LAYERS_MODEL):
        dispatch_keras_h5_to_tfjs_layers_model_conversion(
            args.input_path,
            output_dir=args.output_path,
            quantization_dtype_map=quantization_dtype_map,
            split_weights_by_layer=args.split_weights_by_layer,
            weight_shard_size_bytes=weight_shard_size_bytes,
            metadata=metadata_map)
    elif (input_format == common.KERAS_MODEL
          and output_format == common.TFJS_GRAPH_MODEL):
        dispatch_keras_h5_to_tfjs_graph_model_conversion(
            args.input_path,
            output_dir=args.output_path,
            quantization_dtype_map=quantization_dtype_map,
            skip_op_check=args.skip_op_check,
            strip_debug_ops=args.strip_debug_ops,
            weight_shard_size_bytes=weight_shard_size_bytes,
            control_flow_v2=args.control_flow_v2,
            experiments=args.experiments,
            metadata=metadata_map)
    elif (input_format == common.KERAS_SAVED_MODEL
          and output_format == common.TFJS_LAYERS_MODEL):
        dispatch_keras_saved_model_to_tensorflowjs_conversion(
            args.input_path,
            args.output_path,
            quantization_dtype_map=quantization_dtype_map,
            split_weights_by_layer=args.split_weights_by_layer,
            weight_shard_size_bytes=weight_shard_size_bytes,
            metadata=metadata_map)
    elif (input_format == common.TF_SAVED_MODEL
          and output_format == common.TFJS_GRAPH_MODEL):
        tf_saved_model_conversion_v2.convert_tf_saved_model(
            args.input_path,
            args.output_path,
            signature_def=args.signature_name,
            saved_model_tags=args.saved_model_tags,
            quantization_dtype_map=quantization_dtype_map,
            skip_op_check=args.skip_op_check,
            strip_debug_ops=args.strip_debug_ops,
            weight_shard_size_bytes=weight_shard_size_bytes,
            control_flow_v2=args.control_flow_v2,
            experiments=args.experiments,
            metadata=metadata_map)
    elif (input_format == common.TF_HUB_MODEL
          and output_format == common.TFJS_GRAPH_MODEL):
        tf_saved_model_conversion_v2.convert_tf_hub_module(
            args.input_path,
            args.output_path,
            signature=args.signature_name,
            saved_model_tags=args.saved_model_tags,
            quantization_dtype_map=quantization_dtype_map,
            skip_op_check=args.skip_op_check,
            strip_debug_ops=args.strip_debug_ops,
            weight_shard_size_bytes=weight_shard_size_bytes,
            control_flow_v2=args.control_flow_v2,
            experiments=args.experiments,
            metadata=metadata_map)
    elif (input_format == common.TFJS_LAYERS_MODEL
          and output_format == common.KERAS_MODEL):
        dispatch_tensorflowjs_to_keras_h5_conversion(args.input_path,
                                                     args.output_path)
    elif (input_format == common.TFJS_LAYERS_MODEL
          and output_format == common.KERAS_SAVED_MODEL):
        dispatch_tensorflowjs_to_keras_saved_model_conversion(
            args.input_path, args.output_path)
    elif (input_format == common.TFJS_LAYERS_MODEL
          and output_format == common.TFJS_LAYERS_MODEL):
        dispatch_tensorflowjs_to_tensorflowjs_conversion(
            args.input_path,
            args.output_path,
            quantization_dtype_map=quantization_dtype_map,
            weight_shard_size_bytes=weight_shard_size_bytes)
    elif (input_format == common.TFJS_LAYERS_MODEL
          and output_format == common.TFJS_GRAPH_MODEL):
        dispatch_tfjs_layers_model_to_tfjs_graph_conversion(
            args.input_path,
            args.output_path,
            quantization_dtype_map=quantization_dtype_map,
            skip_op_check=args.skip_op_check,
            strip_debug_ops=args.strip_debug_ops,
            weight_shard_size_bytes=weight_shard_size_bytes,
            control_flow_v2=args.control_flow_v2,
            experiments=args.experiments,
            metadata=metadata_map)
    elif (input_format == common.TF_FROZEN_MODEL
          and output_format == common.TFJS_GRAPH_MODEL):
        tf_saved_model_conversion_v2.convert_tf_frozen_model(
            args.input_path,
            args.output_node_names,
            args.output_path,
            quantization_dtype_map=quantization_dtype_map,
            skip_op_check=args.skip_op_check,
            strip_debug_ops=args.strip_debug_ops,
            weight_shard_size_bytes=weight_shard_size_bytes,
            experiments=args.experiments,
            metadata=metadata_map)
    else:
        raise ValueError(
            'Unsupported input_format - output_format pair: %s - %s' %
            (input_format, output_format))
示例#10
0
def main():
    FLAGS = setup_arguments()
    if FLAGS.show_version:
        print('\ntensorflowjs %s\n' % version.version)
        print('Dependency versions:')
        print('  keras %s' % keras.__version__)
        print('  tensorflow %s' % tf.__version__)
        return

    weight_shard_size_bytes = 1024 * 1024 * 4
    if FLAGS.weight_shard_size_bytes:
        if FLAGS.output_format != 'tfjs_layers_model':
            raise ValueError(
                'The --weight_shard_size_byte flag is only supported under '
                'output_format=tfjs_layers_model.')
        weight_shard_size_bytes = FLAGS.weight_shard_size_bytes

    if FLAGS.input_path is None:
        raise ValueError('Error: The input_path argument must be set. '
                         'Run with --help flag for usage information.')

    input_format, output_format = _standardize_input_output_formats(
        FLAGS.input_format, FLAGS.output_format)

    quantization_dtype = (
        quantization.QUANTIZATION_BYTES_TO_DTYPES[FLAGS.quantization_bytes]
        if FLAGS.quantization_bytes else None)

    if (FLAGS.signature_name
            and input_format not in ('tf_saved_model', 'tf_hub')):
        raise ValueError(
            'The --signature_name flag is applicable only to "tf_saved_model" and '
            '"tf_hub" input format, but the current input format is '
            '"%s".' % input_format)

    # TODO(cais, piyu): More conversion logics can be added as additional
    #   branches below.
    if input_format == 'keras' and output_format == 'tfjs_layers_model':
        dispatch_keras_h5_to_tfjs_layers_model_conversion(
            FLAGS.input_path,
            output_dir=FLAGS.output_path,
            quantization_dtype=quantization_dtype,
            split_weights_by_layer=FLAGS.split_weights_by_layer)
    elif input_format == 'keras' and output_format == 'tfjs_graph_model':
        dispatch_keras_h5_to_tfjs_graph_model_conversion(
            FLAGS.input_path,
            output_dir=FLAGS.output_path,
            quantization_dtype=quantization_dtype,
            skip_op_check=FLAGS.skip_op_check,
            strip_debug_ops=FLAGS.strip_debug_ops)
    elif (input_format == 'keras_saved_model'
          and output_format == 'tfjs_layers_model'):
        dispatch_keras_saved_model_to_tensorflowjs_conversion(
            FLAGS.input_path,
            FLAGS.output_path,
            quantization_dtype=quantization_dtype,
            split_weights_by_layer=FLAGS.split_weights_by_layer)
    elif (input_format == 'tf_saved_model'
          and output_format == 'tfjs_graph_model'):
        tf_saved_model_conversion_v2.convert_tf_saved_model(
            FLAGS.input_path,
            FLAGS.output_path,
            signature_def=FLAGS.signature_name,
            saved_model_tags=FLAGS.saved_model_tags,
            quantization_dtype=quantization_dtype,
            skip_op_check=FLAGS.skip_op_check,
            strip_debug_ops=FLAGS.strip_debug_ops)
    elif (input_format == 'tf_hub' and output_format == 'tfjs_graph_model'):
        tf_saved_model_conversion_v2.convert_tf_hub_module(
            FLAGS.input_path,
            FLAGS.output_path,
            FLAGS.signature_name,
            FLAGS.saved_model_tags,
            skip_op_check=FLAGS.skip_op_check,
            strip_debug_ops=FLAGS.strip_debug_ops)
    elif (input_format == 'tfjs_layers_model' and output_format == 'keras'):
        dispatch_tensorflowjs_to_keras_h5_conversion(FLAGS.input_path,
                                                     FLAGS.output_path)
    elif (input_format == 'tfjs_layers_model'
          and output_format == 'keras_saved_model'):
        dispatch_tensorflowjs_to_keras_saved_model_conversion(
            FLAGS.input_path, FLAGS.output_path)
    elif (input_format == 'tfjs_layers_model'
          and output_format == 'tfjs_layers_model'):
        dispatch_tensorflowjs_to_tensorflowjs_conversion(
            FLAGS.input_path,
            FLAGS.output_path,
            quantization_dtype=_parse_quantization_bytes(
                FLAGS.quantization_bytes),
            weight_shard_size_bytes=weight_shard_size_bytes)
    elif (input_format == 'tfjs_layers_model'
          and output_format == 'tfjs_graph_model'):
        dispatch_tfjs_layers_model_to_tfjs_graph_conversion(
            FLAGS.input_path,
            FLAGS.output_path,
            quantization_dtype=_parse_quantization_bytes(
                FLAGS.quantization_bytes),
            skip_op_check=FLAGS.skip_op_check,
            strip_debug_ops=FLAGS.strip_debug_ops)
    else:
        raise ValueError(
            'Unsupported input_format - output_format pair: %s - %s' %
            (input_format, output_format))