def test_from_corpus(self): self.sentences1_file.seek(0) vocabulary = Vocabulary.from_corpus([self.sentences1_file]) self.assertEqual(vocabulary.num_words(), 10 + 3) self.assertEqual(vocabulary.num_classes(), 10 + 3) self.sentences1_file.seek(0) self.sentences2_file.seek(0) vocabulary = Vocabulary.from_corpus([self.sentences1_file, self.sentences2_file], 3) self.assertEqual(vocabulary.num_words(), 10 + 3) self.assertEqual(vocabulary.num_classes(), 3 + 3) sos_id = vocabulary.word_to_id['<s>'] eos_id = vocabulary.word_to_id['</s>'] unk_id = vocabulary.word_to_id['<unk>'] self.assertEqual(sos_id, 10) self.assertEqual(eos_id, 11) self.assertEqual(unk_id, 12) self.assertEqual(vocabulary.word_id_to_class_id[sos_id], 3) self.assertEqual(vocabulary.word_id_to_class_id[eos_id], 4) self.assertEqual(vocabulary.word_id_to_class_id[unk_id], 5) word_ids = set() class_ids = set() for word in vocabulary.words(): if not word.startswith('<'): word_id = vocabulary.word_to_id[word] word_ids.add(word_id) class_ids.add(vocabulary.word_id_to_class_id[word_id]) self.assertEqual(word_ids, set(range(10))) self.assertEqual(class_ids, set(range(3)))
def setUp(self): script_path = os.path.dirname(os.path.realpath(__file__)) sentences_path = os.path.join(script_path, 'sentences.txt') with open(sentences_path) as sentences_file: self.vocabulary = Vocabulary.from_corpus(sentences_file) sentences_file.seek(0) self.statistics = WordStatistics([sentences_file], self.vocabulary)
def main(): parser = argparse.ArgumentParser(prog='wctool') argument_group = parser.add_argument_group("files") argument_group.add_argument( '--training-set', metavar='FILE', type=TextFileType('r'), nargs='+', required=True, help='text or .gz files containing training data (one sentence per ' 'line)') argument_group.add_argument( '--vocabulary', metavar='FILE', type=TextFileType('r'), default=None, help='text or .gz file containing a list of words to include in class ' 'forming, and possibly their initial classes') argument_group.add_argument( '--vocabulary-format', metavar='FORMAT', type=str, default='words', help='vocabulary format, one of "words" (one word per line, default), ' '"classes" (word and class ID per line), "srilm-classes" (class ' 'name, membership probability, and word per line)') argument_group.add_argument( '--output-file', metavar='FILE', type=TextFileType('w'), default='-', help='where to write the word classes (default stdout)') argument_group.add_argument( '--output-format', metavar='FORMAT', type=str, default='srilm-classes', help='format of the output file, one of "classes" (word and class ID ' 'per line), "srilm-classes" (default; class name, membership ' 'probability, and word per line)') argument_group.add_argument( '--output-frequency', metavar='N', type=int, default='1', help='save classes N times per optimization iteration (default 1)') argument_group = parser.add_argument_group("optimization") argument_group.add_argument( '--num-classes', metavar='N', type=int, default=2000, help='number of classes to form, if vocabulary is not specified ' '(default 2000)') argument_group.add_argument( '--method', metavar='NAME', type=str, default='bigram-theano', help='method for creating word classes, one of "bigram-theano", ' '"bigram-numpy" (default "bigram-theano")') argument_group = parser.add_argument_group("logging and debugging") argument_group.add_argument( '--log-file', metavar='FILE', type=str, default='-', help='path where to write log file (default is standard output)') argument_group.add_argument( '--log-level', metavar='LEVEL', type=str, default='info', help='minimum level of events to log, one of "debug", "info", "warn" ' '(default "info")') argument_group.add_argument( '--log-interval', metavar='N', type=int, default=1000, help='print statistics after every Nth word; quiet if less than one ' '(default 1000)') args = parser.parse_args() log_file = args.log_file log_level = getattr(logging, args.log_level.upper(), None) if not isinstance(log_level, int): raise ValueError("Invalid logging level requested: " + args.log_level) log_format = '%(asctime)s %(funcName)s: %(message)s' if args.log_file == '-': logging.basicConfig(stream=sys.stdout, format=log_format, level=log_level) else: logging.basicConfig(filename=log_file, format=log_format, level=log_level) if args.vocabulary is None: vocabulary = Vocabulary.from_corpus(args.training_set, args.num_classes) for subset_file in args.training_set: subset_file.seek(0) else: vocabulary = Vocabulary.from_file(args.vocabulary, args.vocabulary_format) print("Number of words in vocabulary:", vocabulary.num_words()) print("Number of word classes:", vocabulary.num_classes()) print("Number of normal word classes:", vocabulary.num_normal_classes) logging.info("Reading word unigram and bigram statistics.") statistics = WordStatistics(args.training_set, vocabulary) if args.method == 'bigram-theano': optimizer = TheanoBigramOptimizer(statistics, vocabulary) elif args.method == 'bigram-numpy': optimizer = NumpyBigramOptimizer(statistics, vocabulary) else: raise ValueError("Invalid method requested: " + args.method) iteration = 1 while True: logging.info("Starting iteration %d.", iteration) num_words = 0 num_moves = 0 for word in vocabulary.words(): start_time = time() num_words += 1 if optimizer.move_to_best_class(word): num_moves += 1 duration = time() - start_time if (args.log_interval >= 1) and \ (num_words % args.log_interval == 0): logging.info( "[%d] (%.1f %%) of iteration %d -- moves = %d, cost = %.2f, duration = %.1f ms", num_words, num_words / vocabulary.num_words() * 100, iteration, num_moves, optimizer.log_likelihood(), duration * 100) if is_scheduled(num_words, args.output_frequency, vocabulary.num_words()): save(optimizer, args.output_file, args.output_format) if num_moves == 0: break iteration += 1 logging.info("Optimization finished.") save(optimizer, args.output_file, args.output_format)
def train(args): numpy.random.seed(args.random_seed) log_file = args.log_file log_level = getattr(logging, args.log_level.upper(), None) if not isinstance(log_level, int): print("Invalid logging level requested:", args.log_level) sys.exit(1) log_format = "%(asctime)s %(funcName)s: %(message)s" if args.log_file == "-": logging.basicConfig(stream=sys.stdout, format=log_format, level=log_level) else: logging.basicConfig(filename=log_file, format=log_format, level=log_level) if args.debug: theano.config.compute_test_value = "warn" print("Enabled computing test values for tensor variables.") print("Warning: GpuArray backend will fail random number generation!") else: theano.config.compute_test_value = "off" theano.config.profile = args.profile theano.config.profile_memory = args.profile with h5py.File(args.model_path, "a", driver="core") as state: if state.keys(): print("Reading vocabulary from existing network state.") sys.stdout.flush() vocabulary = Vocabulary.from_state(state) elif args.vocabulary is None: print("Constructing vocabulary from training set.") sys.stdout.flush() vocabulary = Vocabulary.from_corpus(args.training_set, args.num_classes) for training_file in args.training_set: training_file.seek(0) vocabulary.get_state(state) else: print("Reading vocabulary from {}.".format(args.vocabulary)) sys.stdout.flush() with open(args.vocabulary, "rt", encoding="utf-8") as vocab_file: vocabulary = Vocabulary.from_file(vocab_file, args.vocabulary_format) if args.vocabulary_format == "classes": print("Computing class membership probabilities from " "unigram word counts.") sys.stdout.flush() vocabulary.compute_probs(args.training_set) vocabulary.get_state(state) print("Number of words in vocabulary:", vocabulary.num_words()) print("Number of word classes:", vocabulary.num_classes()) if args.num_noise_samples > vocabulary.num_classes(): print( "Number of noise samples ({}) is larger than the number of " "classes. This doesn't make sense and would cause sampling " "to fail.".format(args.num_noise_samples) ) sys.exit(1) if args.unk_penalty is None: ignore_unk = False unk_penalty = None elif args.unk_penalty == 0: ignore_unk = True unk_penalty = None else: ignore_unk = False unk_penalty = args.unk_penalty num_training_files = len(args.training_set) if len(args.weights) > num_training_files: print("You specified more weights than training files.") sys.exit(1) weights = numpy.ones(num_training_files).astype(theano.config.floatX) for index, weight in enumerate(args.weights): weights[index] = weight training_options = { "batch_size": args.batch_size, "sequence_length": args.sequence_length, "validation_frequency": args.validation_frequency, "patience": args.patience, "stopping_criterion": args.stopping_criterion, "max_epochs": args.max_epochs, "min_epochs": args.min_epochs, "max_annealing_count": args.max_annealing_count, } logging.debug("TRAINING OPTIONS") for option_name, option_value in training_options.items(): logging.debug("%s: %s", option_name, str(option_value)) optimization_options = { "method": args.optimization_method, "epsilon": args.numerical_stability_term, "gradient_decay_rate": args.gradient_decay_rate, "sqr_gradient_decay_rate": args.sqr_gradient_decay_rate, "learning_rate": args.learning_rate, "weights": weights, "momentum": args.momentum, "max_gradient_norm": args.gradient_normalization, "cost_function": args.cost, "num_noise_samples": args.num_noise_samples, "noise_sharing": args.noise_sharing, "ignore_unk": ignore_unk, "unk_penalty": unk_penalty, } logging.debug("OPTIMIZATION OPTIONS") for option_name, option_value in optimization_options.items(): if type(option_value) is list: value_str = ", ".join(str(x) for x in option_value) logging.debug("%s: [%s]", option_name, value_str) else: logging.debug("%s: %s", option_name, str(option_value)) if len(args.sampling) > len(args.training_set): print("You specified more sampling coefficients than training " "files.") sys.exit(1) print("Creating trainer.") sys.stdout.flush() trainer = Trainer(training_options, vocabulary, args.training_set, args.sampling) trainer.set_logging(args.log_interval) print("Building neural network.") sys.stdout.flush() if args.architecture == "lstm300" or args.architecture == "lstm1500": architecture = Architecture.from_package(args.architecture) else: with open(args.architecture, "rt", encoding="utf-8") as arch_file: architecture = Architecture.from_description(arch_file) network = Network( architecture, vocabulary, trainer.class_prior_probs, args.noise_dampening, default_device=args.default_device, profile=args.profile, ) print("Compiling optimization function.") sys.stdout.flush() optimizer = create_optimizer(optimization_options, network, device=args.default_device, profile=args.profile) if args.print_graph: print("Cost function computation graph:") theano.printing.debugprint(optimizer.gradient_update_function) trainer.initialize(network, state, optimizer) if not args.validation_file is None: print("Building text scorer for cross-validation.") sys.stdout.flush() scorer = TextScorer(network, ignore_unk, unk_penalty, args.profile) print("Validation text:", args.validation_file.name) validation_mmap = mmap.mmap(args.validation_file.fileno(), 0, prot=mmap.PROT_READ) validation_iter = LinearBatchIterator( validation_mmap, vocabulary, batch_size=args.batch_size, max_sequence_length=None ) trainer.set_validation(validation_iter, scorer) else: print("Cross-validation will not be performed.") validation_iter = None print("Training neural network.") sys.stdout.flush() trainer.train() if not "layers" in state.keys(): print( "The model has not been trained. No cross-validations were " "performed or training did not improve the model." ) elif not validation_iter is None: network.set_state(state) perplexity = scorer.compute_perplexity(validation_iter) print("Best validation set perplexity:", perplexity)
def train(args): numpy.random.seed(args.random_seed) log_file = args.log_file log_level = getattr(logging, args.log_level.upper(), None) if not isinstance(log_level, int): print("Invalid logging level requested:", args.log_level) sys.exit(1) log_format = '%(asctime)s %(funcName)s: %(message)s' if args.log_file == '-': logging.basicConfig(stream=sys.stdout, format=log_format, level=log_level) else: logging.basicConfig(filename=log_file, format=log_format, level=log_level) if args.debug: theano.config.compute_test_value = 'warn' else: theano.config.compute_test_value = 'off' theano.config.profile = args.profile theano.config.profile_memory = args.profile with h5py.File(args.model_path, 'a', driver='core') as state: if state.keys(): print("Reading vocabulary from existing network state.") sys.stdout.flush() vocabulary = Vocabulary.from_state(state) elif args.vocabulary is None: print("Constructing vocabulary from training set.") sys.stdout.flush() vocabulary = Vocabulary.from_corpus(args.training_set, args.num_classes) for training_file in args.training_set: training_file.seek(0) vocabulary.get_state(state) else: print("Reading vocabulary from {}.".format(args.vocabulary)) sys.stdout.flush() with open(args.vocabulary, 'rt', encoding='utf-8') as vocab_file: vocabulary = Vocabulary.from_file(vocab_file, args.vocabulary_format) if args.vocabulary_format == 'classes': print("Computing class membership probabilities from " "unigram word counts.") sys.stdout.flush() vocabulary.compute_probs(args.training_set) vocabulary.get_state(state) print("Number of words in vocabulary:", vocabulary.num_words()) print("Number of word classes:", vocabulary.num_classes()) print("Building neural network.") sys.stdout.flush() if args.architecture == 'lstm300' or args.architecture == 'lstm1500': architecture = Architecture.from_package(args.architecture) else: with open(args.architecture, 'rt', encoding='utf-8') as arch_file: architecture = Architecture.from_description(arch_file) network = Network(vocabulary, architecture, profile=args.profile) sys.stdout.flush() if args.unk_penalty is None: ignore_unk = False unk_penalty = None elif args.unk_penalty == 0: ignore_unk = True unk_penalty = None else: ignore_unk = False unk_penalty = args.unk_penalty num_training_files = len(args.training_set) if len(args.weights) > num_training_files: print("You specified more weights than training files.") sys.exit(1) weights = numpy.ones(num_training_files).astype(theano.config.floatX) for index, weight in enumerate(args.weights): weights[index] = weight print("Building text scorer.") scorer = TextScorer(network, ignore_unk, unk_penalty, args.profile) validation_mmap = mmap.mmap(args.validation_file.fileno(), 0, prot=mmap.PROT_READ) validation_iter = \ LinearBatchIterator(validation_mmap, vocabulary, batch_size=args.batch_size, max_sequence_length=None) optimization_options = { 'method': args.optimization_method, 'epsilon': args.numerical_stability_term, 'gradient_decay_rate': args.gradient_decay_rate, 'sqr_gradient_decay_rate': args.sqr_gradient_decay_rate, 'learning_rate': args.learning_rate, 'weights': weights, 'momentum': args.momentum, 'max_gradient_norm': args.gradient_normalization, 'cost_function': args.cost, 'num_noise_samples': args.num_noise_samples, 'ignore_unk': ignore_unk, 'unk_penalty': unk_penalty } logging.debug("OPTIMIZATION OPTIONS") for option_name, option_value in optimization_options.items(): if type(option_value) is list: value_str = ', '.join(str(x) for x in option_value) logging.debug("%s: [%s]", option_name, value_str) else: logging.debug("%s: %s", option_name, str(option_value)) training_options = { 'strategy': args.training_strategy, 'batch_size': args.batch_size, 'sequence_length': args.sequence_length, 'validation_frequency': args.validation_frequency, 'patience': args.patience, 'stopping_criterion': args.stopping_criterion, 'max_epochs': args.max_epochs, 'min_epochs': args.min_epochs, 'max_annealing_count': args.max_annealing_count } logging.debug("TRAINING OPTIONS") for option_name, option_value in training_options.items(): logging.debug("%s: %s", option_name, str(option_value)) print("Building neural network trainer.") sys.stdout.flush() if len(args.sampling) > len(args.training_set): print("You specified more sampling coefficients than training " "files.") sys.exit(1) trainer = create_trainer( training_options, optimization_options, network, vocabulary, scorer, args.training_set, args.sampling, validation_iter, state, args.profile) trainer.set_logging(args.log_interval) print("Training neural network.") sys.stdout.flush() trainer.train() if not 'layers' in state.keys(): print("The model has not been trained. No cross-validations were " "performed or training did not improve the model.") else: network.set_state(state) perplexity = scorer.compute_perplexity(validation_iter) print("Best validation set perplexity:", perplexity)
def main(): parser = argparse.ArgumentParser(prog='wctool') argument_group = parser.add_argument_group("files") argument_group.add_argument( '--training-set', metavar='FILE', type=TextFileType('r'), nargs='+', required=True, help='text or .gz files containing training data (one sentence per ' 'line)') argument_group.add_argument( '--vocabulary', metavar='FILE', type=TextFileType('r'), default=None, help='text or .gz file containing a list of words to include in class ' 'forming, and possibly their initial classes') argument_group.add_argument( '--vocabulary-format', metavar='FORMAT', type=str, default='words', help='vocabulary format, one of "words" (one word per line, default), ' '"classes" (word and class ID per line), "srilm-classes" (class ' 'name, membership probability, and word per line)') argument_group.add_argument( '--output-file', metavar='FILE', type=TextFileType('w'), default='-', help='where to write the word classes (default stdout)') argument_group.add_argument( '--output-format', metavar='FORMAT', type=str, default='srilm-classes', help='format of the output file, one of "classes" (word and class ID ' 'per line), "srilm-classes" (default; class name, membership ' 'probability, and word per line)') argument_group.add_argument( '--output-frequency', metavar='N', type=int, default='1', help='save classes N times per optimization iteration (default 1)') argument_group = parser.add_argument_group("optimization") argument_group.add_argument( '--num-classes', metavar='N', type=int, default=2000, help='number of classes to form, if vocabulary is not specified ' '(default 2000)') argument_group.add_argument( '--method', metavar='NAME', type=str, default='bigram-theano', help='method for creating word classes, one of "bigram-theano", ' '"bigram-numpy" (default "bigram-theano")') argument_group = parser.add_argument_group("logging and debugging") argument_group.add_argument( '--log-file', metavar='FILE', type=str, default='-', help='path where to write log file (default is standard output)') argument_group.add_argument( '--log-level', metavar='LEVEL', type=str, default='info', help='minimum level of events to log, one of "debug", "info", "warn" ' '(default "info")') argument_group.add_argument( '--log-interval', metavar='N', type=int, default=1000, help='print statistics after every Nth word; quiet if less than one ' '(default 1000)') args = parser.parse_args() log_file = args.log_file log_level = getattr(logging, args.log_level.upper(), None) if not isinstance(log_level, int): raise ValueError("Invalid logging level requested: " + args.log_level) log_format = '%(asctime)s %(funcName)s: %(message)s' if args.log_file == '-': logging.basicConfig(stream=sys.stdout, format=log_format, level=log_level) else: logging.basicConfig(filename=log_file, format=log_format, level=log_level) if args.vocabulary is None: vocabulary = Vocabulary.from_corpus(args.training_set, args.num_classes) for subset_file in args.training_set: subset_file.seek(0) else: vocabulary = Vocabulary.from_file(args.vocabulary, args.vocabulary_format) print("Number of words in vocabulary:", vocabulary.num_words()) print("Number of word classes:", vocabulary.num_classes()) print("Number of normal word classes:", vocabulary.num_normal_classes) logging.info("Reading word unigram and bigram statistics.") statistics = WordStatistics(args.training_set, vocabulary) if args.method == 'bigram-theano': optimizer = TheanoBigramOptimizer(statistics, vocabulary) elif args.method == 'bigram-numpy': optimizer = NumpyBigramOptimizer(statistics, vocabulary) else: raise ValueError("Invalid method requested: " + args.method) iteration = 1 while True: logging.info("Starting iteration %d.", iteration) num_words = 0 num_moves = 0 for word in vocabulary.words(): start_time = time() num_words += 1 if optimizer.move_to_best_class(word): num_moves += 1 duration = time() - start_time if (args.log_interval >= 1) and \ (num_words % args.log_interval == 0): logging.info("[%d] (%.1f %%) of iteration %d -- moves = %d, cost = %.2f, duration = %.1f ms", num_words, num_words / vocabulary.num_words() * 100, iteration, num_moves, optimizer.log_likelihood(), duration * 100) if is_scheduled(num_words, args.output_frequency, vocabulary.num_words()): save(optimizer, args.output_file, args.output_format) if num_moves == 0: break iteration += 1 logging.info("Optimization finished.") save(optimizer, args.output_file, args.output_format)
def train(args): numpy.random.seed(args.random_seed) log_file = args.log_file log_level = getattr(logging, args.log_level.upper(), None) if not isinstance(log_level, int): raise ValueError("Invalid logging level requested: " + args.log_level) log_format = '%(asctime)s %(funcName)s: %(message)s' if args.log_file == '-': logging.basicConfig(stream=sys.stdout, format=log_format, level=log_level) else: logging.basicConfig(filename=log_file, format=log_format, level=log_level) if args.debug: theano.config.compute_test_value = 'warn' else: theano.config.compute_test_value = 'off' theano.config.profile = args.profile theano.config.profile_memory = args.profile with h5py.File(args.model_path, 'a', driver='core') as state: if state.keys(): print("Reading vocabulary from existing network state.") sys.stdout.flush() vocabulary = Vocabulary.from_state(state) elif args.vocabulary is None: print("Constructing vocabulary from training set.") sys.stdout.flush() vocabulary = Vocabulary.from_corpus(args.training_set, args.num_classes) for training_file in args.training_set: training_file.seek(0) vocabulary.get_state(state) else: print("Reading vocabulary from {}.".format(args.vocabulary)) sys.stdout.flush() with open(args.vocabulary, 'rt', encoding='utf-8') as vocab_file: vocabulary = Vocabulary.from_file(vocab_file, args.vocabulary_format) if args.vocabulary_format == 'classes': print("Computing class membership probabilities from " "unigram word counts.") sys.stdout.flush() vocabulary.compute_probs(args.training_set) vocabulary.get_state(state) print("Number of words in vocabulary:", vocabulary.num_words()) print("Number of word classes:", vocabulary.num_classes()) print("Building neural network.") sys.stdout.flush() if args.architecture == 'lstm300' or args.architecture == 'lstm1500': architecture = Architecture.from_package(args.architecture) else: with open(args.architecture, 'rt', encoding='utf-8') as arch_file: architecture = Architecture.from_description(arch_file) network = Network(vocabulary, architecture, profile=args.profile) sys.stdout.flush() if args.unk_penalty is None: ignore_unk = False unk_penalty = None elif args.unk_penalty == 0: ignore_unk = True unk_penalty = None else: ignore_unk = False unk_penalty = args.unk_penalty num_training_files = len(args.training_set) if len(args.weights) > num_training_files: print("You specified more weights than training files.") sys.exit(1) weights = numpy.ones(num_training_files).astype(theano.config.floatX) for index, weight in enumerate(args.weights): weights[index] = weight print("Building text scorer.") scorer = TextScorer(network, ignore_unk, unk_penalty, args.profile) validation_mmap = mmap.mmap(args.validation_file.fileno(), 0, prot=mmap.PROT_READ) validation_iter = LinearBatchIterator(validation_mmap, vocabulary, batch_size=32) optimization_options = { 'method': args.optimization_method, 'epsilon': args.numerical_stability_term, 'gradient_decay_rate': args.gradient_decay_rate, 'sqr_gradient_decay_rate': args.sqr_gradient_decay_rate, 'learning_rate': args.learning_rate, 'weights': weights, 'momentum': args.momentum, 'max_gradient_norm': args.gradient_normalization, 'ignore_unk': ignore_unk, 'unk_penalty': unk_penalty } logging.debug("OPTIMIZATION OPTIONS") for option_name, option_value in optimization_options.items(): if type(option_value) is list: value_str = ', '.join(str(x) for x in option_value) logging.debug("%s: [%s]", option_name, value_str) else: logging.debug("%s: %s", option_name, str(option_value)) training_options = { 'strategy': args.training_strategy, 'batch_size': args.batch_size, 'sequence_length': args.sequence_length, 'validation_frequency': args.validation_frequency, 'patience': args.patience, 'stopping_criterion': args.stopping_criterion, 'max_epochs': args.max_epochs, 'min_epochs': args.min_epochs, 'max_annealing_count': args.max_annealing_count } logging.debug("TRAINING OPTIONS") for option_name, option_value in training_options.items(): logging.debug("%s: %s", option_name, str(option_value)) print("Building neural network trainer.") sys.stdout.flush() if len(args.sampling) > len(args.training_set): print("You specified more sampling coefficients than training " "files.") sys.exit(1) trainer = create_trainer( training_options, optimization_options, network, vocabulary, scorer, args.training_set, args.sampling, validation_iter, state, args.profile) trainer.set_logging(args.log_interval) print("Training neural network.") sys.stdout.flush() trainer.run() if not state.keys(): print("The model has not been trained.") else: network.set_state(state) perplexity = scorer.compute_perplexity(validation_iter) print("Best validation set perplexity:", perplexity)