Beispiel #1
0
def jax2tf_to_tfjs(test_case: ModelTestCase):
    """Converts the given `module` using the TFjs converter."""
    # the model must be converted with with_gradient set to True to be able to
    # convert the saved model to TF.js, as "PreventGradient" is not supported.
    tf_fn = jax2tf.convert(test_case.apply,
                           with_gradient=True,
                           enable_xla=False)

    # Create tf.Variables for the parameters. If you want more useful variable
    # names, you can use `tree.map_structure_with_path` from the `dm-tree`
    # package.
    param_vars = tf.nest.map_structure(
        lambda param: tf.Variable(param, trainable=True), test_case.variables)

    # This is the function that will be stored in the SavedModel. Note this only
    # supports a single argument, but we'd like to be able to pass more
    # arguments to a Module's __call__ function, so we pass them as a list that
    # we expand when passing it to `tf_fn`.
    @tf.function(autograph=False, jit_compile=False)
    def tf_graph(inputs):
        return tf_fn(param_vars, *inputs)

    s_fn = tf_graph.get_concrete_function(
        _get_signatures(test_case.input_specs))
    signatures = {tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY: s_fn}
    wrapper = _ReusableSavedModelWrapper(tf_graph, param_vars)
    saved_model_options = tf.saved_model.SaveOptions(
        experimental_custom_gradients=True)

    with TempDir() as saved_model_path, TempDir() as tfjs_model_path:
        tf.saved_model.save(wrapper,
                            saved_model_path,
                            signatures=signatures,
                            options=saved_model_options)
        tfjs_converter.convert([saved_model_path, tfjs_model_path])
Beispiel #2
0
def saved_model_to_tfjs(input_dir, save_dir):
    """Convert SavedModel to TFJS model."""
    print(f'\nConverting to TFJS:\nInput:{input_dir}\nOutput:{save_dir}\n')
    converter.convert([
        '--input_format=tf_saved_model', '--signature_name=serving_default',
        '--control_flow_v2=True', '--skip_op_check', '--quantize_float16=True',
        '--experiments=True', input_dir, save_dir
    ])
    print('TFJS Conversion Success!')
Beispiel #3
0
 def convert_to_tfjs(self, graph_def_path, output_names):
     try:
         from tensorflowjs.converters import converter
     except ImportError:
         return None
     tfjs_path = os.path.join(self.test_data_directory, self._testMethodName + "_tfjs")
     try:
         converter.convert([graph_def_path, tfjs_path, '--input_format', 'tf_frozen_model',
                            '--output_node_names', ','.join(output_names)])
     except ValueError:
         return None
     model_path = os.path.join(tfjs_path, 'model.json')
     if not os.path.exists(model_path):
         return None
     return model_path
Beispiel #4
0
    def convert_to_tflite(self, graph_def, feed_dict, outputs):
        if not feed_dict:
            return None  # Can't make TFlite model with no inputs
        tf_reset_default_graph()
        with tf_session() as sess:
            tf.import_graph_def(graph_def, name='')
            sess_inputs = [
                sess.graph.get_tensor_by_name(k) for k in feed_dict.keys()
            ]
            sess_outputs = [sess.graph.get_tensor_by_name(n) for n in outputs]
            converter = tf_lite.TFLiteConverter.from_session(
                sess, sess_inputs, sess_outputs)
            #converter.optimizations = [tf.lite.Optimize.DEFAULT]
            converter.target_spec.supported_ops = [
                tf.lite.OpsSet.TFLITE_BUILTINS,  # enable TensorFlow Lite ops.
                tf.lite.OpsSet.SELECT_TF_OPS,  # enable TensorFlow flex ops.
            ]

            from tensorflow.lite.python.convert import ConverterError
            try:
                tflite_model = converter.convert()
                tflite_path = os.path.join(self.test_data_directory,
                                           self._testMethodName + ".tflite")
                dir_name = os.path.dirname(tflite_path)
                if dir_name:
                    os.makedirs(dir_name, exist_ok=True)
                with open(tflite_path, 'wb') as f:
                    f.write(tflite_model)
                return tflite_path
            except ConverterError:
                return None
Beispiel #5
0
def jax2tf_to_tfjs(module: lib.ModuleToConvert):
    """Converts the given `module` using the TFjs converter."""
    with TempDir() as saved_model_path, TempDir() as converted_model_path:
        # the model must be converted with with_gradient set to True to be able to
        # convert the saved model to TF.js, as "PreventGradient" is not supported
        saved_model_lib.convert_and_save_model(
            module.apply,
            module.variables,
            saved_model_path,
            input_signatures=[
                tf.TensorSpec(shape=module.input_shape,
                              dtype=module.dtype,
                              name='input')
            ],
            with_gradient=True,
            compile_model=False,
            enable_xla=False)
        tfjs_converter.convert([saved_model_path, converted_model_path])
Beispiel #6
0
 def convert_to_tfjs(self, graph_def_path, output_names):
     try:
         from tensorflowjs.converters import converter
     except ImportError:
         self.logger.warning(
             "Tensorflowjs.converters package imports failed.")
         return None
     tfjs_path = os.path.join(self.test_data_directory,
                              self._testMethodName + "_tfjs")
     try:
         converter.convert([
             graph_def_path, tfjs_path, '--input_format', 'tf_frozen_model',
             '--output_node_names', ','.join(output_names)
         ])
     except ValueError:
         self.logger.warning("Convert tensorflowjs graph failed.")
         return None
     model_path = os.path.join(tfjs_path, 'model.json')
     if not os.path.exists(model_path):
         self.logger.warning("Tensorflowjs model path %s is empty.",
                             model_path)
         return None
     return model_path
Beispiel #7
0
def run(dryrun):
    print('Welcome to TensorFlow.js Converter.')
    input_path = [{
        'type':
        'input',
        'name':
        common.INPUT_PATH,
        'message':
        'Please provide the path of model file or '
        'the directory that contains model files. \n'
        'If you are converting TFHub module please provide the URL.',
        'filter':
        os.path.expanduser,
        'validate':
        lambda path: 'Please enter a valid path' if not path else True
    }]

    input_params = PyInquirer.prompt(input_path, style=prompt_style)
    detected_input_format, normalized_path = detect_input_format(
        input_params[common.INPUT_PATH])
    input_params[common.INPUT_PATH] = normalized_path

    formats = [{
        'type': 'list',
        'name': common.INPUT_FORMAT,
        'message': input_format_message(detected_input_format),
        'choices': input_formats(detected_input_format)
    }, {
        'type':
        'list',
        'name':
        common.OUTPUT_FORMAT,
        'message':
        'What is your output format?',
        'choices':
        available_output_formats,
        'when':
        lambda answers: value_in_list(answers, common.INPUT_FORMAT, (
            common.KERAS_MODEL, common.TFJS_LAYERS_MODEL))
    }]
    format_params = PyInquirer.prompt(formats,
                                      input_params,
                                      style=prompt_style)
    message = input_path_message(format_params)

    questions = [{
        'type':
        'input',
        'name':
        common.INPUT_PATH,
        'message':
        message,
        'filter':
        expand_input_path,
        'validate':
        lambda value: validate_input_path(value, format_params[common.
                                                               INPUT_FORMAT]),
        'when':
        lambda answers: (not detected_input_format)
    }, {
        'type':
        'list',
        'name':
        common.SAVED_MODEL_TAGS,
        'choices':
        available_tags,
        'message':
        'What is tags for the saved model?',
        'when':
        lambda answers:
        (is_saved_model(answers[common.INPUT_FORMAT]) and
         (common.OUTPUT_FORMAT not in format_params or format_params[
             common.OUTPUT_FORMAT] == common.TFJS_GRAPH_MODEL))
    }, {
        'type':
        'list',
        'name':
        common.SIGNATURE_NAME,
        'message':
        'What is signature name of the model?',
        'choices':
        available_signature_names,
        'when':
        lambda answers:
        (is_saved_model(answers[common.INPUT_FORMAT]) and
         (common.OUTPUT_FORMAT not in format_params or format_params[
             common.OUTPUT_FORMAT] == common.TFJS_GRAPH_MODEL))
    }, {
        'type':
        'list',
        'name':
        common.QUANTIZATION_BYTES,
        'message':
        'Do you want to compress the model? '
        '(this will decrease the model precision.)',
        'choices': [{
            'name': 'No compression (Higher accuracy)',
            'value': None
        }, {
            'name': '2x compression (Accuracy/size trade-off)',
            'value': 2
        }, {
            'name': '4x compression (Smaller size)',
            'value': 1
        }]
    }, {
        'type':
        'input',
        'name':
        common.WEIGHT_SHARD_SIZE_BYTES,
        'message':
        'Please enter shard size (in bytes) of the weight files?',
        'default':
        str(4 * 1024 * 1024),
        'when':
        lambda answers: value_in_list(answers, common.OUTPUT_FORMAT,
                                      (common.TFJS_LAYERS_MODEL))
    }, {
        'type':
        'confirm',
        'name':
        common.SPLIT_WEIGHTS_BY_LAYER,
        'message':
        'Do you want to split weights by layers?',
        'default':
        False,
        'when':
        lambda answers: value_in_list(answers, common.INPUT_FORMAT,
                                      (common.TFJS_LAYERS_MODEL))
    }, {
        'type':
        'confirm',
        'name':
        common.SKIP_OP_CHECK,
        'message':
        'Do you want to skip op validation? \n'
        'This will allow conversion of unsupported ops, \n'
        'you can implement them as custom ops in tfjs-converter.',
        'default':
        False,
        'when':
        lambda answers: value_in_list(answers, common.INPUT_FORMAT, (
            common.TF_SAVED_MODEL, common.TF_HUB_MODEL))
    }, {
        'type':
        'confirm',
        'name':
        common.STRIP_DEBUG_OPS,
        'message':
        'Do you want to strip debug ops? \n'
        'This will improve model execution performance.',
        'default':
        True,
        'when':
        lambda answers: value_in_list(answers, common.INPUT_FORMAT, (
            common.TF_SAVED_MODEL, common.TF_HUB_MODEL))
    }]
    params = PyInquirer.prompt(questions, format_params, style=prompt_style)

    output_options = [{
        'type': 'input',
        'name': common.OUTPUT_PATH,
        'message': 'Which directory do you want to save '
        'the converted model in?',
        'filter': lambda path: update_output_path(path, params),
        'validate': lambda path: len(path) > 0
    }, {
        'type':
        'confirm',
        'message':
        'The output already directory exists, '
        'do you want to overwrite it?',
        'name':
        'overwrite_output_path',
        'default':
        False,
        'when':
        lambda ans: output_path_exists(ans[common.OUTPUT_PATH])
    }]

    while (common.OUTPUT_PATH not in params
           or output_path_exists(params[common.OUTPUT_PATH])
           and not params['overwrite_output_path']):
        params = PyInquirer.prompt(output_options, params, style=prompt_style)

    arguments = generate_arguments(params)
    print('converter command generated:')
    print('tensorflowjs_converter %s' % ' '.join(arguments))
    print('\n\n')

    log_file = os.path.join(tempfile.gettempdir(), 'converter_error.log')
    if not dryrun:
        try:
            converter.convert(arguments)
            print('\n\nFile(s) generated by conversion:')

            print("Filename {0:25} Size(bytes)".format(''))
            total_size = 0
            output_path = params[common.OUTPUT_PATH]
            if os.path.isfile(output_path):
                output_path = os.path.dirname(output_path)
            for basename in sorted(os.listdir(output_path)):
                filename = os.path.join(output_path, basename)
                size = os.path.getsize(filename)
                print("{0:35} {1}".format(basename, size))
                total_size += size
            print("Total size:{0:24} {1}".format('', total_size))
        except BaseException:
            exc_type, exc_value, exc_traceback = sys.exc_info()
            lines = traceback.format_exception(exc_type, exc_value,
                                               exc_traceback)
            with open(log_file, 'a') as writer:
                writer.write(''.join(line for line in lines))
            print('Conversion failed, please check error log file %s.' %
                  log_file)
Beispiel #8
0
def _convert_tfjs_model(saved_model_path: str, destination_path: str):
    converter.convert([
        CONVERTER_SAVED_MODEL_INPUT_FLAG, CONVERTER_SERVING_TAG_FLAG,
        CONVERTER_DEFAULT_SIGNATURE_FLAG, saved_model_path, destination_path
    ])
Beispiel #9
0
    def testTFJSPredictExtractorWithKerasModel(self, multi_model,
                                               multi_output):
        if not _TFJS_IMPORTED:
            self.skipTest('This test requires TensorFlow JS.')

        input1 = tf.keras.layers.Input(shape=(1, ), name='input1')
        input2 = tf.keras.layers.Input(shape=(1, ), name='input2')
        inputs = [input1, input2]
        input_layer = tf.keras.layers.concatenate(inputs)
        output_layers = {}
        output_layers['output1'] = (tf.keras.layers.Dense(
            1, activation=tf.nn.sigmoid, name='output1')(input_layer))
        if multi_output:
            output_layers['output2'] = (tf.keras.layers.Dense(
                1, activation=tf.nn.sigmoid, name='output2')(input_layer))

        model = tf.keras.models.Model(inputs, output_layers)
        model.compile(optimizer=tf.keras.optimizers.Adam(lr=.001),
                      loss=tf.keras.losses.binary_crossentropy,
                      metrics=['accuracy'])

        train_features = {'input1': [[0.0], [1.0]], 'input2': [[1.0], [0.0]]}
        labels = {'output1': [[1], [0]]}
        if multi_output:
            labels['output2'] = [[1], [0]]

        example_weights = {'output1': [1.0, 0.5]}
        if multi_output:
            example_weights['output2'] = [1.0, 0.5]
        dataset = tf.data.Dataset.from_tensor_slices(
            (train_features, labels, example_weights))
        dataset = dataset.shuffle(buffer_size=1).repeat().batch(2)
        model.fit(dataset, steps_per_epoch=1)

        src_model_path = tempfile.mkdtemp()
        model.save(src_model_path)

        dst_model_path = tempfile.mkdtemp()
        converter.convert([
            '--input_format=tf_saved_model',
            '--saved_model_tags=serve',
            '--signature_name=serving_default',
            src_model_path,
            dst_model_path,
        ])

        model_specs = [config_pb2.ModelSpec(name='model1', model_type='tf_js')]
        if multi_model:
            model_specs.append(
                config_pb2.ModelSpec(name='model2', model_type='tf_js'))

        eval_config = config_pb2.EvalConfig(model_specs=model_specs)
        eval_shared_models = [
            self.createTestEvalSharedModel(
                model_name='model1',
                eval_saved_model_path=dst_model_path,
                model_type='tf_js')
        ]
        if multi_model:
            eval_shared_models.append(
                self.createTestEvalSharedModel(
                    model_name='model2',
                    eval_saved_model_path=dst_model_path,
                    model_type='tf_js'))

        schema = text_format.Parse(
            """
        feature {
          name: "input1"
          type: FLOAT
        }
        feature {
          name: "input2"
          type: FLOAT
        }
        feature {
          name: "non_model_feature"
          type: INT
        }
        """, schema_pb2.Schema())
        tfx_io = test_util.InMemoryTFExampleRecord(
            schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)
        feature_extractor = features_extractor.FeaturesExtractor(eval_config)
        predictor = tfjs_predict_extractor.TFJSPredictExtractor(
            eval_config=eval_config, eval_shared_model=eval_shared_models)

        examples = [
            self._makeExample(input1=0.0, input2=1.0, non_model_feature=0),
            self._makeExample(input1=1.0, input2=0.0, non_model_feature=1),
        ]

        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            result = (
                pipeline
                | 'Create' >> beam.Create(
                    [e.SerializeToString() for e in examples], reshuffle=False)
                | 'BatchExamples' >> tfx_io.BeamSource(batch_size=2)
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | feature_extractor.stage_name >> feature_extractor.ptransform
                | predictor.stage_name >> predictor.ptransform)

            # pylint: enable=no-value-for-parameter

            def check_result(got):
                try:
                    self.assertLen(got, 1)
                    got = got[0]
                    self.assertIn(constants.PREDICTIONS_KEY, got)
                    self.assertLen(got[constants.PREDICTIONS_KEY], 2)

                    for item in got[constants.PREDICTIONS_KEY]:
                        if multi_model:
                            self.assertIn('model1', item)
                            self.assertIn('model2', item)
                            if multi_output:
                                self.assertIn('Identity', item['model1'])
                                self.assertIn('Identity_1', item['model1'])

                        elif multi_output:
                            self.assertIn('Identity', item)
                            self.assertIn('Identity_1', item)

                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(result, check_result, label='result')
Beispiel #10
0
  def testTFJSPredictExtractorWithSingleOutputModel(self, multi_model,
                                                    multi_output,
                                                    batch_examples,
                                                    batch_inputs):
    input1 = tf.keras.layers.Input(shape=(1,), name='input1')
    input2 = tf.keras.layers.Input(shape=(1,), name='input2')
    inputs = [input1, input2]
    input_layer = tf.keras.layers.concatenate(inputs)
    output_layers = {}
    output_layers['output1'] = (
        tf.keras.layers.Dense(1, activation=tf.nn.sigmoid,
                              name='output1')(input_layer))
    if multi_output:
      output_layers['output2'] = (
          tf.keras.layers.Dense(1, activation=tf.nn.sigmoid,
                                name='output2')(input_layer))

    model = tf.keras.models.Model(inputs, output_layers)
    model.compile(
        optimizer=tf.keras.optimizers.Adam(lr=.001),
        loss=tf.keras.losses.binary_crossentropy,
        metrics=['accuracy'])

    train_features = {'input1': [[0.0], [1.0]], 'input2': [[1.0], [0.0]]}
    labels = {'output1': [[1], [0]]}
    if multi_output:
      labels['output2'] = [[1], [0]]

    example_weights = {'output1': [1.0, 0.5]}
    if multi_output:
      example_weights['output2'] = [1.0, 0.5]
    dataset = tf.data.Dataset.from_tensor_slices(
        (train_features, labels, example_weights))
    dataset = dataset.shuffle(buffer_size=1).repeat().batch(2)
    model.fit(dataset, steps_per_epoch=1)

    src_model_path = tempfile.mkdtemp()
    model.save(src_model_path)

    dst_model_path = tempfile.mkdtemp()
    converter.convert([
        '--input_format=tf_saved_model',
        '--saved_model_tags=serve',
        '--signature_name=serving_default',
        src_model_path,
        dst_model_path,
    ])

    model_specs = [config.ModelSpec(name='model1', model_type='tf_js')]
    if multi_model:
      model_specs.append(config.ModelSpec(name='model2', model_type='tf_js'))

    eval_config = config.EvalConfig(model_specs=model_specs)
    eval_shared_models = [
        self.createTestEvalSharedModel(
            model_name='model1',
            eval_saved_model_path=dst_model_path,
            model_type='tf_js')
    ]
    if multi_model:
      eval_shared_models.append(
          self.createTestEvalSharedModel(
              model_name='model2',
              eval_saved_model_path=dst_model_path,
              model_type='tf_js'))

    desired_batch_size = 2 if batch_examples else None
    predictor = tfjs_predict_extractor.TFJSPredictExtractor(
        eval_config=eval_config,
        eval_shared_model=eval_shared_models,
        desired_batch_size=desired_batch_size)

    predict_features = [
        {
            'input1': np.array([0.0], dtype=np.float32),
            'input2': np.array([1.0], dtype=np.float32),
            'non_model_feature': np.array([0]),  # should be ignored by model
        },
        {
            'input1': np.array([1.0], dtype=np.float32),
            'input2': np.array([0.0], dtype=np.float32),
            'non_model_feature': np.array([1]),  # should be ignored by model
        }
    ]

    if batch_inputs:
      predict_features = [{k: np.expand_dims(v, 0)
                           for k, v in p.items()}
                          for p in predict_features]

    with beam.Pipeline() as pipeline:
      # pylint: disable=no-value-for-parameter
      result = (
          pipeline
          | 'Create' >> beam.Create(predict_features)
          | 'FeaturesToExtracts' >>
          beam.Map(lambda x: {constants.FEATURES_KEY: x})
          | predictor.stage_name >> predictor.ptransform)

      # pylint: enable=no-value-for-parameter

      def check_result(got):
        try:
          self.assertLen(got, 2)
          # We can't verify the actual predictions, but we can verify the keys.
          for item in got:
            self.assertIn(constants.PREDICTIONS_KEY, item)

            if multi_model:
              self.assertIn('model1', item[constants.PREDICTIONS_KEY])
              self.assertIn('model2', item[constants.PREDICTIONS_KEY])
              if multi_output:
                self.assertIn('Identity',
                              item[constants.PREDICTIONS_KEY]['model1'])
                self.assertIn('Identity_1',
                              item[constants.PREDICTIONS_KEY]['model1'])

            elif multi_output:
              self.assertIn('Identity', item[constants.PREDICTIONS_KEY])
              self.assertIn('Identity_1', item[constants.PREDICTIONS_KEY])

        except AssertionError as err:
          raise util.BeamAssertException(err)

      util.assert_that(result, check_result, label='result')