Beispiel #1
0
 def testTrain(self, split_data):
   input_flags = self._GetDefaultFlags(split_data)
   input_flags = model_flags.update_flags(input_flags)
   train.train(input_flags)
   self.assertTrue(
       tf.io.gfile.exists(os.path.join(input_flags.train_dir, 'graph.pbtxt')))
   self.assertTrue(
       tf.io.gfile.exists(os.path.join(input_flags.train_dir, 'labels.txt')))
   self.assertTrue(
       tf.io.gfile.exists(
           os.path.join(input_flags.train_dir, 'accuracy_last.txt')))
Beispiel #2
0
def main(_):
    # Update flags
    flags = model_flags.update_flags(FLAGS)

    if flags.train:
        # Create model folders where logs and model will be stored
        os.makedirs(flags.train_dir)
        os.mkdir(flags.summaries_dir)

        # Model training
        train.train(flags)
    else:
        if not os.path.isdir(flags.train_dir):
            raise ValueError(
                'model is not trained set "--train 1" and retrain it')

    # write all flags settings into json
    with open(os.path.join(flags.train_dir, 'flags.json'), 'wt') as f:
        json.dump(flags.__dict__, f)

    # convert to SavedModel
    test.convert_model_saved(flags, 'non_stream',
                             modes.Modes.NON_STREAM_INFERENCE)
    try:
        test.convert_model_saved(flags, 'stream_state_internal',
                                 modes.Modes.STREAM_INTERNAL_STATE_INFERENCE)
    except (ValueError, IndexError) as e:
        logging.info('FAILED to run TF streaming: %s', e)

    logging.info('run TF non streaming model accuracy evaluation')
    # with TF
    folder_name = 'tf'
    test.tf_non_stream_model_accuracy(flags, folder_name)

    # with TF.
    # We can apply non stream model on stream data, by running inference
    # every 200ms (for example), so that total latency will be similar with
    # streaming model which is executed every 20ms.
    # To measure the impact of sampling on model accuracy,
    # we introduce time_shift_ms during accuracy evaluation.
    # Convert milliseconds to samples:
    time_shift_samples = int(
        (flags.time_shift_ms * flags.sample_rate) / model_flags.MS_PER_SECOND)
    test.tf_non_stream_model_accuracy(
        flags,
        folder_name,
        time_shift_samples,
        accuracy_name='tf_non_stream_model_sampling_stream_accuracy.txt')

    name2opt = {
        '': None,
        'quantize_opt_for_size_': [tf.lite.Optimize.DEFAULT],
    }

    for opt_name, optimizations in name2opt.items():

        if (opt_name and flags.feature_type == 'mfcc_tf'
                and flags.preprocess == 'raw'):
            logging.info(
                'feature type mfcc_tf needs quantization aware training '
                'for quantization - it is not implemented')
            continue

        folder_name = opt_name + 'tflite_non_stream'
        file_name = 'non_stream.tflite'
        mode = modes.Modes.NON_STREAM_INFERENCE
        test.convert_model_tflite(flags,
                                  folder_name,
                                  mode,
                                  file_name,
                                  optimizations=optimizations)
        test.tflite_non_stream_model_accuracy(flags, folder_name, file_name)

        # these models are using bi-rnn, so they are non streamable by default
        # also models using striding or pooling are not supported for streaming now
        non_streamable_models = {'att_mh_rnn', 'att_rnn', 'tc_resnet'}

        model_is_streamable = True
        if flags.model_name in non_streamable_models:
            model_is_streamable = False
        # below models can use striding in time dimension,
        # but this is currently unsupported
        elif flags.model_name == 'cnn':
            for strides in model_utils.parse(flags.cnn_strides):
                if strides[0] > 1:
                    model_is_streamable = False
                    break
        elif flags.model_name == 'ds_cnn':
            if model_utils.parse(flags.cnn1_strides)[0] > 1:
                model_is_streamable = False
            for strides in model_utils.parse(flags.dw2_strides):
                if strides[0] > 1:
                    model_is_streamable = False
                    break

        # set input data shape for testing inference in streaming mode
        flags.data_shape = modes.get_input_data_shape(
            flags, modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE)

        # if model can be streamed, then run conversion/evaluation in streaming mode
        if model_is_streamable:
            # ---------------- TF streaming model accuracy evaluation ----------------
            # Streaming model with external state evaluation using TF with state reset
            if not opt_name:
                logging.info(
                    'run TF evalution only without optimization/quantization')
                try:
                    folder_name = 'tf'
                    test.tf_stream_state_external_model_accuracy(
                        flags,
                        folder_name,
                        accuracy_name=
                        'stream_state_external_model_accuracy_sub_set_reset1.txt',
                        reset_state=True
                    )  # with state reset between test sequences

                    # Streaming (with external state) evaluation using TF no state reset
                    test.tf_stream_state_external_model_accuracy(
                        flags,
                        folder_name,
                        accuracy_name=
                        'stream_state_external_model_accuracy_sub_set_reset0.txt',
                        reset_state=False)  # without state reset

                    # Streaming (with internal state) evaluation using TF no state reset
                    test.tf_stream_state_internal_model_accuracy(
                        flags, folder_name)
                except (ValueError, IndexError) as e:
                    logging.info('FAILED to run TF streaming: %s', e)

            logging.info('run TFlite streaming model accuracy evaluation')
            try:
                # convert model to TFlite
                folder_name = opt_name + 'tflite_stream_state_external'
                file_name = 'stream_state_external.tflite'
                mode = modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE
                test.convert_model_tflite(flags,
                                          folder_name,
                                          mode,
                                          file_name,
                                          optimizations=optimizations)

                # Streaming model accuracy evaluation with TFLite with state reset
                test.tflite_stream_state_external_model_accuracy(
                    flags,
                    folder_name,
                    file_name,
                    accuracy_name=
                    'tflite_stream_state_external_model_accuracy_reset1.txt',
                    reset_state=True)

                # Streaming model accuracy evaluation with TFLite without state reset
                test.tflite_stream_state_external_model_accuracy(
                    flags,
                    folder_name,
                    file_name,
                    accuracy_name=
                    'tflite_stream_state_external_model_accuracy_reset0.txt',
                    reset_state=False)
            except (ValueError, IndexError) as e:
                logging.info('FAILED to run TFLite streaming: %s', e)
def main(_):
    # Update flags
    flags = model_flags.update_flags(FLAGS)

    if flags.train:
        # Create model folders where logs and model will be stored
        os.makedirs(flags.train_dir)
        os.mkdir(flags.summaries_dir)

        # Model training
        train.train(flags)

    # write all flags settings into json
    with open(os.path.join(flags.train_dir, 'flags.json'), 'wt') as f:
        json.dump(flags.__dict__, f)

    # convert to SavedModel
    test.convert_model_saved(flags, 'non_stream', Modes.NON_STREAM_INFERENCE)
    test.convert_model_saved(flags, 'stream_state_internal',
                             Modes.STREAM_INTERNAL_STATE_INFERENCE)

    # ---------------- non streaming model accuracy evaluation ----------------
    # with TF
    folder = 'tf'
    test.tf_non_stream_model_accuracy(flags, folder)

    # with TF.
    # We can apply non stream model on stream data, by running inference
    # every 200ms (for example), so that total latency will be similar with
    # streaming model which is executed every 20ms.
    # To measure the impact of sampling on model accuracy,
    # we introduce time_shift_ms during accuracy evaluation.
    # Convert milliseconds to samples:
    time_shift_samples = int(
        (flags.time_shift_ms * flags.sample_rate) / model_flags.MS_PER_SECOND)
    test.tf_non_stream_model_accuracy(
        flags,
        folder,
        time_shift_samples,
        accuracy_name='tf_non_stream_model_sampling_stream_accuracy.txt')

    # with TFLite
    folder = 'tflite_non_stream'
    fname = 'non_stream.tflite'
    mode = Modes.NON_STREAM_INFERENCE
    test.convert_model_tflite(flags, folder, mode, fname)
    test.tflite_non_stream_model_accuracy(flags, folder, fname)

    # ---------------- TF streaming model accuracy evaluation ----------------
    # Streaming model (with external state) evaluation using TF (with state reset)
    folder = 'tf'
    test.tf_stream_state_external_model_accuracy(
        flags,
        folder,
        accuracy_name='stream_state_external_model_accuracy_sub_set_reset1.txt',
        reset_state=True)  # with state reset between test sequences

    # Streaming model (with external state) evaluation using TF (no state reset)
    test.tf_stream_state_external_model_accuracy(
        flags,
        folder,
        accuracy_name='stream_state_external_model_accuracy_sub_set_reset0.txt',
        reset_state=False)  # without state reset

    # Streaming model (with internal state) evaluation using TF (no state reset)
    test.tf_stream_state_internal_model_accuracy(flags, folder)

    # --------------- TFlite streaming model accuracy evaluation ---------------
    # convert model to TFlite
    folder = 'tflite_stream_state_external'
    fname = 'stream_state_external.tflite'
    mode = Modes.STREAM_EXTERNAL_STATE_INFERENCE
    test.convert_model_tflite(flags, folder, mode, fname)

    # Streaming model accuracy evaluation with TFLite with state reset
    test.tflite_stream_state_external_model_accuracy(
        flags,
        folder,
        fname,
        accuracy_name='tflite_stream_state_external_model_accuracy_reset1.txt',
        reset_state=True)

    # Streaming model accuracy evaluation with TFLite without state reset
    test.tflite_stream_state_external_model_accuracy(
        flags,
        folder,
        fname,
        accuracy_name='tflite_stream_state_external_model_accuracy_reset0.txt',
        reset_state=False)
Beispiel #4
0
def main(_):
    # Update flags
    flags = model_flags.update_flags(FLAGS)

    if flags.train:
        # Create model folders where logs and model will be stored
        os.makedirs(flags.train_dir)
        os.mkdir(flags.summaries_dir)

        # Model training
        train.train(flags)

    # write all flags settings into json
    with open(os.path.join(flags.train_dir, 'flags.json'), 'wt') as f:
        json.dump(flags.__dict__, f)

    # convert to SavedModel
    test.convert_model_saved(flags, 'non_stream', Modes.NON_STREAM_INFERENCE)
    test.convert_model_saved(flags, 'stream_state_internal',
                             Modes.STREAM_INTERNAL_STATE_INFERENCE)

    logging.info('run TF non streaming model accuracy evaluation')
    # with TF
    folder_name = 'tf'
    test.tf_non_stream_model_accuracy(flags, folder_name)

    # with TF.
    # We can apply non stream model on stream data, by running inference
    # every 200ms (for example), so that total latency will be similar with
    # streaming model which is executed every 20ms.
    # To measure the impact of sampling on model accuracy,
    # we introduce time_shift_ms during accuracy evaluation.
    # Convert milliseconds to samples:
    time_shift_samples = int(
        (flags.time_shift_ms * flags.sample_rate) / model_flags.MS_PER_SECOND)
    test.tf_non_stream_model_accuracy(
        flags,
        folder_name,
        time_shift_samples,
        accuracy_name='tf_non_stream_model_sampling_stream_accuracy.txt')

    name2opt = {
        '': None,
        'quantize_opt_for_size_': [tf.lite.Optimize.OPTIMIZE_FOR_SIZE],
    }

    for opt_name, optimizations in name2opt.items():

        if opt_name and flags.feature_type == 'mfcc_tf':
            logging.info(
                'feature type mfcc_tf needs quantization aware training '
                'for quantization - it is not implemented')
            continue

        folder_name = opt_name + 'tflite_non_stream'
        file_name = 'non_stream.tflite'
        mode = Modes.NON_STREAM_INFERENCE
        test.convert_model_tflite(flags,
                                  folder_name,
                                  mode,
                                  file_name,
                                  optimizations=optimizations)
        test.tflite_non_stream_model_accuracy(flags, folder_name, file_name)

        # ---------------- TF streaming model accuracy evaluation ----------------
        # Streaming model (with external state) evaluation using TF with state reset
        if not opt_name:
            logging.info(
                'run TF evalution only without optimization/quantization')
            try:
                folder_name = 'tf'
                test.tf_stream_state_external_model_accuracy(
                    flags,
                    folder_name,
                    accuracy_name=
                    'stream_state_external_model_accuracy_sub_set_reset1.txt',
                    reset_state=True
                )  # with state reset between test sequences

                # Streaming (with external state) evaluation using TF no state reset
                test.tf_stream_state_external_model_accuracy(
                    flags,
                    folder_name,
                    accuracy_name=
                    'stream_state_external_model_accuracy_sub_set_reset0.txt',
                    reset_state=False)  # without state reset

                # Streaming (with internal state) evaluation using TF no state reset
                test.tf_stream_state_internal_model_accuracy(
                    flags, folder_name)
            except ValueError as e:
                logging.error('FAILED to run TF streaming: %s', e)

        logging.info('run TFlite streaming model accuracy evaluation')
        try:
            # convert model to TFlite
            folder_name = opt_name + 'tflite_stream_state_external'
            file_name = 'stream_state_external.tflite'
            mode = Modes.STREAM_EXTERNAL_STATE_INFERENCE
            test.convert_model_tflite(flags,
                                      folder_name,
                                      mode,
                                      file_name,
                                      optimizations=optimizations)

            # Streaming model accuracy evaluation with TFLite with state reset
            test.tflite_stream_state_external_model_accuracy(
                flags,
                folder_name,
                file_name,
                accuracy_name=
                'tflite_stream_state_external_model_accuracy_reset1.txt',
                reset_state=True)

            # Streaming model accuracy evaluation with TFLite without state reset
            test.tflite_stream_state_external_model_accuracy(
                flags,
                folder_name,
                file_name,
                accuracy_name=
                'tflite_stream_state_external_model_accuracy_reset0.txt',
                reset_state=False)
        except ValueError as e:
            logging.error('FAILED to run TFLite streaming: %s', e)