def restore_model_trained(parameter_pathfile='', model_pathfile='', dataset_pathfile='', embedding_filepath='', model_folder=''): if model_folder == '': parameters = pickle.load(open(parameter_pathfile, 'rb')) token_to_vector = utils_nlp.load_pretrained_token_embeddings( embedding_filepath) dataset = pickle.load(open(dataset_pathfile, 'rb')) name_entity = NER(parameters, dataset, token_to_vector) name_entity.restore_model_trained( model_pathfile, dataset_pathfile, embedding_filepath, character_dimension=parameters['character_embedding_dimension'], token_dimension=parameters['token_embedding_dimension']) else: parameter_pathfile = os.path.join(model_folder, 'parameters.pickle') dataset_pathfile = os.path.join(model_folder, 'dataset.pickle') model_pathfile = os.path.join(model_folder, 'model.ckpt') parameters = pickle.load(open(parameter_pathfile, 'rb')) token_to_vector = utils_nlp.load_pretrained_token_embeddings( embedding_filepath) dataset = pickle.load(open(dataset_pathfile, 'rb')) name_entity = NER(parameters, dataset, token_to_vector) name_entity.restore_model_trained( model_pathfile, dataset_pathfile, embedding_filepath, character_dimension=parameters['character_embedding_dimension'], token_dimension=parameters['token_embedding_dimension']) return name_entity
def load_pretrained_token_embeddings(self, sess, dataset, parameters): if parameters['token_pretrained_embedding_filepath'] == '': return # Load embeddings start_time = time.time() print('Load token embeddings... ', end='', flush=True) token_to_vector = utils_nlp.load_pretrained_token_embeddings( parameters) initial_weights = sess.run(self.token_embedding_weights.read_value()) number_of_loaded_word_vectors = 0 number_of_token_original_case_found = 0 number_of_token_lowercase_found = 0 number_of_token_digits_replaced_with_zeros_found = 0 number_of_token_lowercase_and_digits_replaced_with_zeros_found = 0 for token in dataset.token_to_index.keys(): if token in token_to_vector.keys(): initial_weights[ dataset.token_to_index[token]] = token_to_vector[token] number_of_token_original_case_found += 1 elif parameters['check_for_lowercase'] and token.lower( ) in token_to_vector.keys(): initial_weights[dataset.token_to_index[ token]] = token_to_vector[token.lower()] number_of_token_lowercase_found += 1 elif parameters['check_for_digits_replaced_with_zeros'] and re.sub( '\d', '0', token) in token_to_vector.keys(): initial_weights[ dataset.token_to_index[token]] = token_to_vector[re.sub( '\d', '0', token)] number_of_token_digits_replaced_with_zeros_found += 1 elif parameters['check_for_lowercase'] and parameters[ 'check_for_digits_replaced_with_zeros'] and re.sub( '\d', '0', token.lower()) in token_to_vector.keys(): initial_weights[ dataset.token_to_index[token]] = token_to_vector[re.sub( '\d', '0', token.lower())] number_of_token_lowercase_and_digits_replaced_with_zeros_found += 1 else: continue number_of_loaded_word_vectors += 1 elapsed_time = time.time() - start_time print('done ({0:.2f} seconds)'.format(elapsed_time)) print("number_of_token_original_case_found: {0}".format( number_of_token_original_case_found)) print("number_of_token_lowercase_found: {0}".format( number_of_token_lowercase_found)) print("number_of_token_digits_replaced_with_zeros_found: {0}".format( number_of_token_digits_replaced_with_zeros_found)) print( "number_of_token_lowercase_and_digits_replaced_with_zeros_found: {0}" .format( number_of_token_lowercase_and_digits_replaced_with_zeros_found )) print('number_of_loaded_word_vectors: {0}'.format( number_of_loaded_word_vectors)) print("dataset.vocabulary_size: {0}".format(dataset.vocabulary_size)) sess.run(self.token_embedding_weights.assign(initial_weights))
def load_dataset(self, dataset_filepaths, parameters, token_to_vector=None): ''' dataset_filepaths : dictionary with keys 'train', 'valid', 'test', 'deploy' ''' start_time = time.time() print('Load dataset... ', end='', flush=True) if parameters['token_pretrained_embedding_filepath'] != '': if token_to_vector == None: token_to_vector = utils_nlp.load_pretrained_token_embeddings( parameters) else: token_to_vector = {} if self.verbose: print("len(token_to_vector): {0}".format(len(token_to_vector))) # Load pretraining dataset to ensure that index to label is compatible to the pretrained model, # and that token embeddings that are learned in the pretrained model are loaded properly. all_tokens_in_pretraining_dataset = [] all_characters_in_pretraining_dataset = [] if parameters['use_pretrained_model']: pretraining_dataset = pickle.load( open( os.path.join(parameters['pretrained_model_folder'], 'dataset.pickle'), 'rb')) all_tokens_in_pretraining_dataset = pretraining_dataset.index_to_token.values( ) all_characters_in_pretraining_dataset = pretraining_dataset.index_to_character.values( ) remap_to_unk_count_threshold = 1 self.UNK_TOKEN_INDEX = 0 self.PADDING_CHARACTER_INDEX = 0 self.tokens_mapped_to_unk = [] self.UNK = 'UNK' self.unique_labels = [] labels = {} tokens = {} label_count = {} token_count = {} character_count = {} for dataset_type in ['train', 'valid', 'test', 'deploy']: labels[dataset_type], tokens[dataset_type], token_count[dataset_type], label_count[dataset_type], character_count[dataset_type] \ = self._parse_dataset(dataset_filepaths.get(dataset_type, None)) if self.verbose: print("dataset_type: {0}".format(dataset_type)) if self.verbose: print("len(token_count[dataset_type]): {0}".format( len(token_count[dataset_type]))) token_count['all'] = {} for token in list(token_count['train'].keys()) + list( token_count['valid'].keys()) + list( token_count['test'].keys()) + list( token_count['deploy'].keys()): token_count['all'][token] = token_count['train'][ token] + token_count['valid'][token] + token_count['test'][ token] + token_count['deploy'][token] if parameters['load_all_pretrained_token_embeddings']: for token in token_to_vector: if token not in token_count['all']: token_count['all'][token] = -1 token_count['train'][token] = -1 for token in all_tokens_in_pretraining_dataset: if token not in token_count['all']: token_count['all'][token] = -1 token_count['train'][token] = -1 character_count['all'] = {} for character in list(character_count['train'].keys()) + list( character_count['valid'].keys()) + list( character_count['test'].keys()) + list( character_count['deploy'].keys()): character_count['all'][character] = character_count['train'][ character] + character_count['valid'][ character] + character_count['test'][ character] + character_count['deploy'][character] for character in all_characters_in_pretraining_dataset: if character not in character_count['all']: character_count['all'][character] = -1 character_count['train'][character] = -1 for dataset_type in dataset_filepaths.keys(): if self.verbose: print("dataset_type: {0}".format(dataset_type)) if self.verbose: print("len(token_count[dataset_type]): {0}".format( len(token_count[dataset_type]))) label_count['all'] = {} for character in list(label_count['train'].keys()) + list( label_count['valid'].keys()) + list( label_count['test'].keys()) + list( label_count['deploy'].keys()): label_count['all'][character] = label_count['train'][ character] + label_count['valid'][character] + label_count[ 'test'][character] + label_count['deploy'][character] token_count['all'] = utils.order_dictionary(token_count['all'], 'value_key', reverse=True) label_count['all'] = utils.order_dictionary(label_count['all'], 'key', reverse=False) character_count['all'] = utils.order_dictionary(character_count['all'], 'value', reverse=True) if self.verbose: print('character_count[\'all\']: {0}'.format( character_count['all'])) token_to_index = {} token_to_index[self.UNK] = self.UNK_TOKEN_INDEX iteration_number = 0 number_of_unknown_tokens = 0 if self.verbose: print("parameters['remap_unknown_tokens_to_unk']: {0}".format( parameters['remap_unknown_tokens_to_unk'])) if self.verbose: print("len(token_count['train'].keys()): {0}".format( len(token_count['train'].keys()))) for token, count in token_count['all'].items(): if iteration_number == self.UNK_TOKEN_INDEX: iteration_number += 1 if parameters['remap_unknown_tokens_to_unk'] == 1 and \ (token_count['train'][token] == 0 or \ parameters['load_only_pretrained_token_embeddings']) and \ not utils_nlp.is_token_in_pretrained_embeddings(token, token_to_vector, parameters) and \ token not in all_tokens_in_pretraining_dataset: if self.verbose: print("token: {0}".format(token)) if self.verbose: print("token.lower(): {0}".format(token.lower())) if self.verbose: print("re.sub('\d', '0', token.lower()): {0}".format( re.sub('\d', '0', token.lower()))) token_to_index[token] = self.UNK_TOKEN_INDEX number_of_unknown_tokens += 1 self.tokens_mapped_to_unk.append(token) else: token_to_index[token] = iteration_number iteration_number += 1 if self.verbose: print("number_of_unknown_tokens: {0}".format( number_of_unknown_tokens)) infrequent_token_indices = [] for token, count in token_count['train'].items(): if 0 < count <= remap_to_unk_count_threshold: infrequent_token_indices.append(token_to_index[token]) if self.verbose: print("len(token_count['train']): {0}".format( len(token_count['train']))) if self.verbose: print("len(infrequent_token_indices): {0}".format( len(infrequent_token_indices))) # Ensure that both B- and I- versions exist for each label labels_without_bio = set() for label in label_count['all'].keys(): new_label = utils_nlp.remove_bio_from_label_name(label) labels_without_bio.add(new_label) for label in labels_without_bio: if label == 'O': continue if parameters['tagging_format'] == 'bioes': prefixes = ['B-', 'I-', 'E-', 'S-'] else: prefixes = ['B-', 'I-'] for prefix in prefixes: l = prefix + label if l not in label_count['all']: label_count['all'][l] = 0 label_count['all'] = utils.order_dictionary(label_count['all'], 'key', reverse=False) if parameters['use_pretrained_model']: self.unique_labels = sorted( list(pretraining_dataset.label_to_index.keys())) # Make sure labels are compatible with the pretraining dataset. for label in label_count['all']: if label not in pretraining_dataset.label_to_index: raise AssertionError( "The label {0} does not exist in the pretraining dataset. " .format(label) + "Please ensure that only the following labels exist in the dataset: {0}" .format(', '.join(self.unique_labels))) label_to_index = pretraining_dataset.label_to_index.copy() else: label_to_index = {} iteration_number = 0 for label, count in label_count['all'].items(): label_to_index[label] = iteration_number iteration_number += 1 self.unique_labels.append(label) if self.verbose: print('self.unique_labels: {0}'.format(self.unique_labels)) character_to_index = {} iteration_number = 0 for character, count in character_count['all'].items(): if iteration_number == self.PADDING_CHARACTER_INDEX: iteration_number += 1 character_to_index[character] = iteration_number iteration_number += 1 if self.verbose: print('token_count[\'train\'][0:10]: {0}'.format( list(token_count['train'].items())[0:10])) token_to_index = utils.order_dictionary(token_to_index, 'value', reverse=False) if self.verbose: print('token_to_index: {0}'.format(token_to_index)) index_to_token = utils.reverse_dictionary(token_to_index) if parameters['remap_unknown_tokens_to_unk'] == 1: index_to_token[self.UNK_TOKEN_INDEX] = self.UNK if self.verbose: print('index_to_token: {0}'.format(index_to_token)) if self.verbose: print('label_count[\'train\']: {0}'.format(label_count['train'])) label_to_index = utils.order_dictionary(label_to_index, 'value', reverse=False) if self.verbose: print('label_to_index: {0}'.format(label_to_index)) index_to_label = utils.reverse_dictionary(label_to_index) if self.verbose: print('index_to_label: {0}'.format(index_to_label)) character_to_index = utils.order_dictionary(character_to_index, 'value', reverse=False) index_to_character = utils.reverse_dictionary(character_to_index) if self.verbose: print('character_to_index: {0}'.format(character_to_index)) if self.verbose: print('index_to_character: {0}'.format(index_to_character)) if self.verbose: print('labels[\'train\'][0:10]: {0}'.format(labels['train'][0:10])) if self.verbose: print('tokens[\'train\'][0:10]: {0}'.format(tokens['train'][0:10])) if self.verbose: # Print sequences of length 1 in train set for token_sequence, label_sequence in zip(tokens['train'], labels['train']): if len(label_sequence) == 1 and label_sequence[0] != 'O': print("{0}\t{1}".format(token_sequence[0], label_sequence[0])) self.token_to_index = token_to_index self.index_to_token = index_to_token self.index_to_character = index_to_character self.character_to_index = character_to_index self.index_to_label = index_to_label self.label_to_index = label_to_index if self.verbose: print("len(self.token_to_index): {0}".format( len(self.token_to_index))) if self.verbose: print("len(self.index_to_token): {0}".format( len(self.index_to_token))) self.tokens = tokens self.labels = labels token_indices, label_indices, character_indices_padded, character_indices, token_lengths, characters, label_vector_indices = self._convert_to_indices( dataset_filepaths.keys()) self.token_indices = token_indices self.label_indices = label_indices self.character_indices_padded = character_indices_padded self.character_indices = character_indices self.token_lengths = token_lengths self.characters = characters self.label_vector_indices = label_vector_indices self.number_of_classes = max(self.index_to_label.keys()) + 1 self.vocabulary_size = max(self.index_to_token.keys()) + 1 self.alphabet_size = max(self.index_to_character.keys()) + 1 if self.verbose: print("self.number_of_classes: {0}".format(self.number_of_classes)) if self.verbose: print("self.alphabet_size: {0}".format(self.alphabet_size)) if self.verbose: print("self.vocabulary_size: {0}".format(self.vocabulary_size)) # unique_labels_of_interest is used to compute F1-scores. self.unique_labels_of_interest = list(self.unique_labels) self.unique_labels_of_interest.remove('O') self.unique_label_indices_of_interest = [] for lab in self.unique_labels_of_interest: self.unique_label_indices_of_interest.append(label_to_index[lab]) self.infrequent_token_indices = infrequent_token_indices if self.verbose: print('self.unique_labels_of_interest: {0}'.format( self.unique_labels_of_interest)) if self.verbose: print('self.unique_label_indices_of_interest: {0}'.format( self.unique_label_indices_of_interest)) elapsed_time = time.time() - start_time print('done ({0:.2f} seconds)'.format(elapsed_time)) return token_to_vector
def load_dataset(self, dataset_filepaths, parameters, token_to_vector=None): ''' dataset_filepaths : dictionary with keys 'train', 'valid', 'test', 'deploy' Load word vectors từ file đã chuẩn bị sẵn ''' start_time = time.time() print('Load dataset... ', end='', flush=True) if parameters['token_pretrained_embedding_filepath'] != '': if token_to_vector == None: token_to_vector = utils_nlp.load_pretrained_token_embeddings( parameters) else: token_to_vector = {} if self.verbose: print("len(token_to_vector): {0}".format(len(token_to_vector))) # Load pretraining dataset to ensure that index to label is compatible to the pretrained model, # and that token embeddings that are learned in the pretrained model are loaded properly. all_tokens_in_pretraining_dataset = [] all_characters_in_pretraining_dataset = [] if parameters['use_pretrained_model']: pretraining_dataset = pickle.load( open( os.path.join(parameters['pretrained_model_folder'], 'dataset.pickle'), 'rb')) all_tokens_in_pretraining_dataset = pretraining_dataset.index_to_token.values( ) # Những token lưu ở đợt train trước all_characters_in_pretraining_dataset = pretraining_dataset.index_to_character.values( ) # Những character lưu ở đợt train trước remap_to_unk_count_threshold = 1 self.UNK_TOKEN_INDEX = 0 # Index của những unknow token self.PADDING_CHARACTER_INDEX = 0 self.tokens_mapped_to_unk = [] # những unknown token self.UNK = 'UNK' self.unique_labels = [] # Các nhãn tồn tại trong dataset labels = {} # nhãn {all: ...., train: ..., test: ...} tokens = {} # token {all: ...., train: ..., test: ...} label_count = {} # Đếm số nhãn {all: ...., train: ..., test: ...} token_count = {} # Đếm số token {all: ...., train: ..., test: ...} character_count = {} # Đếm số ký tự {all: ...., train: ..., test: ...} for dataset_type in ['train', 'valid', 'test', 'deploy']: labels[dataset_type], tokens[dataset_type], token_count[dataset_type], label_count[dataset_type], character_count[dataset_type] \ = self._parse_dataset(dataset_filepaths.get(dataset_type, None)) if self.verbose: print("dataset_type: {0}".format(dataset_type)) if self.verbose: print("len(token_count[dataset_type]): {0}".format( len(token_count[dataset_type]))) # Tính tổng hợp lại cho tất cả các dataset token_count['all'] = {} for token in list(token_count['train'].keys()) + list( token_count['valid'].keys()) + list( token_count['test'].keys()) + list( token_count['deploy'].keys()): token_count['all'][token] = token_count['train'][ token] + token_count['valid'][token] + token_count['test'][ token] + token_count['deploy'][token] # Thêm những token ở pretrained trước với giá trị -1 if parameters['load_all_pretrained_token_embeddings']: for token in token_to_vector: if token not in token_count['all']: token_count['all'][token] = -1 token_count['train'][token] = -1 for token in all_tokens_in_pretraining_dataset: if token not in token_count['all']: token_count['all'][token] = -1 token_count['train'][token] = -1 # Tính tổng hợp lại cho tất cả các dataset character_count['all'] = {} for character in list(character_count['train'].keys()) + list( character_count['valid'].keys()) + list( character_count['test'].keys()) + list( character_count['deploy'].keys()): character_count['all'][character] = character_count['train'][ character] + character_count['valid'][ character] + character_count['test'][ character] + character_count['deploy'][character] # Thêm những token ở pretrained trước với giá trị -1 for character in all_characters_in_pretraining_dataset: if character not in character_count['all']: character_count['all'][character] = -1 character_count['train'][character] = -1 for dataset_type in dataset_filepaths.keys(): if self.verbose: print("dataset_type: {0}".format(dataset_type)) if self.verbose: print("len(token_count[dataset_type]): {0}".format( len(token_count[dataset_type]))) # Tính tổng hợp lại các nhãn ở đợt train trước label_count['all'] = {} for character in list(label_count['train'].keys()) + list( label_count['valid'].keys()) + list( label_count['test'].keys()) + list( label_count['deploy'].keys()): label_count['all'][character] = label_count['train'][ character] + label_count['valid'][character] + label_count[ 'test'][character] + label_count['deploy'][character] token_count['all'] = utils.order_dictionary( token_count['all'], 'value_key', reverse=True ) # Sort token count theo các token có freq cao đến thấp, token desc label_count['all'] = utils.order_dictionary( label_count['all'], 'key', reverse=False) # Sort label count theo label asc character_count['all'] = utils.order_dictionary( character_count['all'], 'value', reverse=True ) # Sort character count theo các character có freq cao đến thấp if self.verbose: print('character_count[\'all\']: {0}'.format( character_count['all'])) token_to_index = {} token_to_index[self.UNK] = self.UNK_TOKEN_INDEX iteration_number = 0 number_of_unknown_tokens = 0 if self.verbose: print("parameters['remap_unknown_tokens_to_unk']: {0}".format( parameters['remap_unknown_tokens_to_unk'])) if self.verbose: print("len(token_count['train'].keys()): {0}".format( len(token_count['train'].keys()))) for token, count in token_count['all'].items(): if iteration_number == self.UNK_TOKEN_INDEX: iteration_number += 1 ''' UNK_TOKEN: token không xuất hiện trong pretraining_dataset và trong word vectors ''' if parameters['remap_unknown_tokens_to_unk'] == 1 and \ (token_count['train'][token] == 0 or \ parameters['load_only_pretrained_token_embeddings']) and \ not utils_nlp.is_token_in_pretrained_embeddings(token, token_to_vector, parameters) and \ token not in all_tokens_in_pretraining_dataset: if self.verbose: print("token: {0}".format(token)) if self.verbose: print("token.lower(): {0}".format(token.lower())) if self.verbose: print("re.sub('\d', '0', token.lower()): {0}".format( re.sub('\d', '0', token.lower()))) token_to_index[token] = self.UNK_TOKEN_INDEX number_of_unknown_tokens += 1 self.tokens_mapped_to_unk.append(token) else: token_to_index[token] = iteration_number iteration_number += 1 if self.verbose: print("number_of_unknown_tokens: {0}".format( number_of_unknown_tokens)) infrequent_token_indices = [ ] # Các token xuất hiện thấp trong train dataset for token, count in token_count['train'].items(): if 0 < count <= remap_to_unk_count_threshold: infrequent_token_indices.append(token_to_index[token]) if self.verbose: print("len(token_count['train']): {0}".format( len(token_count['train']))) if self.verbose: print("len(infrequent_token_indices): {0}".format( len(infrequent_token_indices))) # Ensure that both B- and I- versions exist for each label # Bỏ các tiền tố B-, O-, I-... labels_without_bio = set() for label in label_count['all'].keys(): new_label = utils_nlp.remove_bio_from_label_name(label) labels_without_bio.add(new_label) # Kết hợp các ENTITY vs các tiền tố B-, I-,... và thêm vào label count for label in labels_without_bio: if label == 'O': continue if parameters['tagging_format'] == 'bioes': prefixes = ['B-', 'I-', 'E-', 'S-'] else: prefixes = ['B-', 'I-'] for prefix in prefixes: l = prefix + label if l not in label_count['all']: label_count['all'][l] = 0 # Sắp xếp label_count theo label asc label_count['all'] = utils.order_dictionary(label_count['all'], 'key', reverse=False) if parameters['use_pretrained_model']: self.unique_labels = sorted( list(pretraining_dataset.label_to_index.keys())) # Make sure labels are compatible with the pretraining dataset. for label in label_count['all']: if label not in pretraining_dataset.label_to_index: raise AssertionError( "The label {0} does not exist in the pretraining dataset. " .format(label) + "Please ensure that only the following labels exist in the dataset: {0}" .format(', '.join(self.unique_labels))) label_to_index = pretraining_dataset.label_to_index.copy() else: label_to_index = {} iteration_number = 0 for label, count in label_count['all'].items(): label_to_index[label] = iteration_number iteration_number += 1 self.unique_labels.append(label) if self.verbose: print('self.unique_labels: {0}'.format(self.unique_labels)) character_to_index = {} iteration_number = 0 for character, count in character_count['all'].items(): if iteration_number == self.PADDING_CHARACTER_INDEX: iteration_number += 1 character_to_index[character] = iteration_number iteration_number += 1 if self.verbose: print('token_count[\'train\'][0:10]: {0}'.format( list(token_count['train'].items())[0:10])) token_to_index = utils.order_dictionary(token_to_index, 'value', reverse=False) if self.verbose: print('token_to_index: {0}'.format(token_to_index)) index_to_token = utils.reverse_dictionary(token_to_index) if parameters['remap_unknown_tokens_to_unk'] == 1: index_to_token[self.UNK_TOKEN_INDEX] = self.UNK if self.verbose: print('index_to_token: {0}'.format(index_to_token)) if self.verbose: print('label_count[\'train\']: {0}'.format(label_count['train'])) label_to_index = utils.order_dictionary(label_to_index, 'value', reverse=False) if self.verbose: print('label_to_index: {0}'.format(label_to_index)) index_to_label = utils.reverse_dictionary(label_to_index) if self.verbose: print('index_to_label: {0}'.format(index_to_label)) character_to_index = utils.order_dictionary(character_to_index, 'value', reverse=False) index_to_character = utils.reverse_dictionary(character_to_index) if self.verbose: print('character_to_index: {0}'.format(character_to_index)) if self.verbose: print('index_to_character: {0}'.format(index_to_character)) if self.verbose: print('labels[\'train\'][0:10]: {0}'.format(labels['train'][0:10])) if self.verbose: print('tokens[\'train\'][0:10]: {0}'.format(tokens['train'][0:10])) if self.verbose: # Print sequences of length 1 in train set for token_sequence, label_sequence in zip(tokens['train'], labels['train']): if len(label_sequence) == 1 and label_sequence[0] != 'O': print("{0}\t{1}".format(token_sequence[0], label_sequence[0])) self.token_to_index = token_to_index # {token: index sau khi sắp xếp theo freq từ cao đến thấp, 0 nếu là unk token} self.index_to_token = index_to_token # Ngược token_to_index self.index_to_character = index_to_character # Ngược character_to_index self.character_to_index = character_to_index # { character: index sau khi sắp xếp freq từ cao đến thấp} self.index_to_label = index_to_label # Ngược label_to_index self.label_to_index = label_to_index # {label: index sau khi sắp xếp asc} if self.verbose: print("len(self.token_to_index): {0}".format( len(self.token_to_index))) if self.verbose: print("len(self.index_to_token): {0}".format( len(self.index_to_token))) self.tokens = tokens self.labels = labels token_indices, label_indices, character_indices_padded, character_indices, token_lengths, characters, label_vector_indices = self._convert_to_indices( dataset_filepaths.keys()) self.token_indices = token_indices self.label_indices = label_indices self.character_indices_padded = character_indices_padded self.character_indices = character_indices self.token_lengths = token_lengths self.characters = characters self.label_vector_indices = label_vector_indices self.number_of_classes = max(self.index_to_label.keys()) + 1 self.vocabulary_size = max(self.index_to_token.keys()) + 1 self.alphabet_size = max(self.index_to_character.keys()) + 1 if self.verbose: print("self.number_of_classes: {0}".format(self.number_of_classes)) if self.verbose: print("self.alphabet_size: {0}".format(self.alphabet_size)) if self.verbose: print("self.vocabulary_size: {0}".format(self.vocabulary_size)) # unique_labels_of_interest is used to compute F1-scores. self.unique_labels_of_interest = list(self.unique_labels) self.unique_labels_of_interest.remove('O') self.unique_label_indices_of_interest = [] for lab in self.unique_labels_of_interest: self.unique_label_indices_of_interest.append(label_to_index[lab]) self.infrequent_token_indices = infrequent_token_indices if self.verbose: print('self.unique_labels_of_interest: {0}'.format( self.unique_labels_of_interest)) if self.verbose: print('self.unique_label_indices_of_interest: {0}'.format( self.unique_label_indices_of_interest)) elapsed_time = time.time() - start_time print('done ({0:.2f} seconds)'.format(elapsed_time)) return token_to_vector