Ejemplo n.º 1
0
 def __init__(self, config: dict):
     """
     running_config:
         batch_size: 8
         num_epochs:          20
         outdir:              ...
         log_interval_steps:  200
         eval_interval_steps: 200
         save_interval_steps: 200
     """
     self.config = config
     self.config["outdir"] = preprocess_paths(self.config["outdir"])
     # Writers
     self.train_writer = tf.summary.create_file_writer(
         os.path.join(config["outdir"], "tensorboard", "train"))
     self.eval_writer = tf.summary.create_file_writer(
         os.path.join(config["outdir"], "tensorboard", "eval"))
Ejemplo n.º 2
0
def load_yaml(path):
    with open(preprocess_paths(path), "r", encoding="utf-8") as file:
        return yaml.load(file, Loader=yaml.FullLoader)
Ejemplo n.º 3
0
    def __init__(self, config: dict, show=False):
        """
        config = {
            "vocabulary": str,
            "blank_at_zero": bool,
            "beam_width": int,
            "lm_config": {
                ...
            }
        }
        """
        self.config = config
        self.normlizer = NSWNormalizer
        self.config["vocabulary"] = preprocess_paths(self.config["vocabulary"])
        self.config["spker"] = preprocess_paths(self.config["spker"])
        self.config["maplist"] = preprocess_paths(self.config["maplist"])
        with open(self.config['spker']) as f:
            spks = f.readlines()
        self.spker_map = {}
        for idx, spk in enumerate(spks):
            self.spker_map[spk.strip()] = idx
        with open(self.config["maplist"], encoding='utf-8') as f:
            data = f.readlines()
        self.map_dict = {}
        for line in data:
            try:
                a, b = line.strip().split('\t')
            except:
                content = line.split(' ')
                a = content[0]
                b = ' '.join(content[1:])
            a = a.replace('[', '').replace(']', '')
            b = b.split(' ')
            self.map_dict[a] = b

        self.num_classes = 0
        lines = []
        with codecs.open(self.config["vocabulary"], "r", "utf-8") as fin:
            lines.extend(fin.readlines())
        if show:
            logging.info('load token at {}'.format(self.config['vocabulary']))
        self.token_to_index = {}
        self.index_to_token = {}
        self.vocab_array = []
        self.tf_vocab_array = tf.constant([], dtype=tf.string)
        self.index_to_unicode_points = tf.constant([], dtype=tf.int32)
        index = 0
        if self.config["blank_at_zero"]:
            self.blank = 0
            index = 1
            self.tf_vocab_array = tf.concat([self.tf_vocab_array, [""]],
                                            axis=0)
            self.index_to_unicode_points = tf.concat(
                [self.index_to_unicode_points, [0]], axis=0)
        for line in lines:
            line = line.strip()  # Strip the '\n' char
            if line.startswith("#") or not line or line == "\n":
                continue
            self.token_to_index[line] = index
            self.index_to_token[index] = line
            self.vocab_array.append(line)
            self.tf_vocab_array = tf.concat([self.tf_vocab_array, [line]],
                                            axis=0)
            upoint = tf.strings.unicode_decode(line, "UTF-8")
            self.index_to_unicode_points = tf.concat(
                [self.index_to_unicode_points, upoint], axis=0)
            index += 1
        self.num_classes = index
        if not self.config["blank_at_zero"]:
            self.blank = index
            self.num_classes += 1
            self.tf_vocab_array = tf.concat([self.tf_vocab_array, [""]],
                                            axis=0)
            self.index_to_unicode_points = tf.concat(
                [self.index_to_unicode_points, [0]], axis=0)
        self.stop = self.endid()
        self.pad = self.blank
        self.stop = -1
Ejemplo n.º 4
0
    def __init__(self, decoder_config: dict, show=False):
        """
        decoder_config = {
            "vocabulary": str,
            "blank_at_zero": bool,
            "beam_width": int,
            "lm_config": {
                ...
            }
        }
        """
        self.decoder_config = decoder_config

        self.decoder_config["vocabulary"] = preprocess_paths(
            self.decoder_config["vocabulary"])

        self.scorer = None

        self.num_classes = 0
        lines = []
        with codecs.open(self.decoder_config["vocabulary"], "r",
                         "utf-8") as fin:
            lines.extend(fin.readlines())
        if show:
            print('load token at {}'.format(self.decoder_config['vocabulary']))
        self.token_to_index = {}
        self.index_to_token = {}
        self.vocab_array = []
        self.tf_vocab_array = tf.constant([], dtype=tf.string)
        self.index_to_unicode_points = tf.constant([], dtype=tf.int32)
        index = 0
        if self.decoder_config["blank_at_zero"]:
            self.blank = 0
            index = 1
            self.tf_vocab_array = tf.concat([self.tf_vocab_array, [""]],
                                            axis=0)
            self.index_to_unicode_points = tf.concat(
                [self.index_to_unicode_points, [0]], axis=0)
        for line in lines:
            line = line.strip()  # Strip the '\n' char
            # Skip comment line, empty line
            if line.startswith("#") or not line or line == "\n":
                continue
            self.token_to_index[line] = index
            self.index_to_token[index] = line
            self.vocab_array.append(line)
            self.tf_vocab_array = tf.concat([self.tf_vocab_array, [line]],
                                            axis=0)
            upoint = tf.strings.unicode_decode(line, "UTF-8")
            self.index_to_unicode_points = tf.concat(
                [self.index_to_unicode_points, upoint], axis=0)
            index += 1
        self.num_classes = index
        if not self.decoder_config["blank_at_zero"]:
            self.blank = index
            self.num_classes += 1
            self.tf_vocab_array = tf.concat([self.tf_vocab_array, [""]],
                                            axis=0)
            self.index_to_unicode_points = tf.concat(
                [self.index_to_unicode_points, [0]], axis=0)
        if self.decoder_config['model_type'] == 'Transducer':
            self.stop = self.endid()
            self.pad = self.blank
            self.start = self.startid()
        elif self.decoder_config['model_type'] == 'LAS':
            self.stop = self.endid()
            self.pad = 0
            self.start = self.startid()
        elif self.decoder_config['model_type'] == 'LM':
            self.stop = self.endid()
            self.pad = 0
            self.start = self.startid()
        else:
            self.pad = 0
            self.stop = -1