def call_find_template_set(arg_json, arg_nlp, arg_templates): examples = LabeledExample.read(arg_json) indices = [e.index for e in examples.itervalues()] natural_language = {i: NLP.read(arg_nlp, i) for i in indices} word_problems = [WordProblem(examples[i], natural_language[i]) for i in indices] templates = [wp.extract_template() for wp in word_problems] unique = list() wp_template_map = dict() for wp in word_problems: template = wp.template wp_index = wp.labeled_example.index found_template = False for unique_i, u in enumerate(unique): if template == u: wp_template_map[wp_index] = unique_i found_template = True break if not found_template: unique.append(template) wp_template_map[wp_index] = len(unique) - 1 print('{} total and {} unique templates'.format(len(templates), len(unique))) with open(arg_templates, 'wt') as f_handle: out_json = {'templates': [t.to_json() for t in unique], 'wp_template_map': wp_template_map} f_handle.write(json.dumps(out_json))
def call_extract_features(arg_json, arg_nlp, arg_templates, arg_parameters): examples = LabeledExample.read(arg_json) indices = [e.index for e in examples.itervalues()] natural_language = {i: NLP.read(arg_nlp, i) for i in indices} word_problems = [WordProblem(examples[i], natural_language[i]) for i in indices] with open(arg_templates, 'rt') as f_handle: raw = f_handle.read() parsed = json.loads(raw) unique_templates = [Template.from_json(j) for j in parsed['templates']] # TODO(Eric): using only 2 word problems for testing unique_templates = unique_templates[:2] word_problems = word_problems[:2] feature_extractor = FeatureExtractor(unique_templates, word_problems) derivations = initialize_partial_derivations_for_all_templates( word_problems[0], unique_templates) derivation = derivations[0] while not derivation.is_complete(): derivation = derivation.all_ways_to_fill_next_slot()[0] print(feature_extractor.extract(derivation)) print(derivation)
def call_count_unique(arg_json, arg_unique, arg_nlp): examples = LabeledExample.read(arg_json) templates = list() for index in arg_unique: example = examples[index] natural_language = NLP.read(arg_nlp, index) wp = WordProblem(example, natural_language) templates.append(wp.extract_template()) print(len(set(templates))) print(json.dumps([t.to_json() for t in templates]))
def call_print(arg_json, arg_index, arg_nlp): examples = LabeledExample.read(arg_json) example = examples[arg_index] natural_language = NLP.read(arg_nlp, arg_index) wp = WordProblem(example, natural_language) wp.extract_template() print(wp) print('questions: {}' .format([(s.as_text(), s.object_of_sentence()) for s in wp.nlp.questions().itervalues()])) print('commands: {}' .format([(s.as_text(), s.object_of_sentence()) for s in wp.nlp.commands().itervalues()]))
def call_fold(arg_testfold, arg_numfolds, arg_foldoutput, arg_json, arg_nlp, arg_templates, arg_parameters): examples = LabeledExample.read(arg_json) indices = [e.index for e in examples.itervalues()][:5] # TODO just 5 for testing natural_language = {i: NLP.read(arg_nlp, i) for i in indices} word_problems = [WordProblem(examples[i], natural_language[i]) for i in indices] fold_indices = make_fold_indices(arg_numfolds, len(word_problems)) test_indices = fold_indices.pop(arg_testfold) train_indices = list() for per_fold in fold_indices: train_indices.extend(per_fold) with open(arg_templates, 'rt') as f_handle: raw = f_handle.read() parsed = json.loads(raw) unique_templates = [Template.from_json(j) for j in parsed['templates']] wp_template_map = {int(k): v for k, v in parsed['wp_template_map'].iteritems()} train_wps = [word_problems[i] for i in train_indices] train_templates_indices = list({wp_template_map[wp.labeled_example.index] for wp in train_wps}) remap_templates = {wp.labeled_example.index: train_templates_indices.index( wp_template_map[wp.labeled_example.index]) for wp in train_wps} train_templates = [unique_templates[i] for i in train_templates_indices] feature_extractor = FeatureExtractor(train_templates, train_wps) classifier = optimize_parameters(feature_extractor, train_wps, train_templates, remap_templates) with open(arg_parameters, 'wt') as f_handle: f_handle.write(json.dumps(classifier.to_json())) correct = 0 for test_i in test_indices: test_wp = word_problems[test_i] correct += classifier.solve(test_wp) print('{} correct out of {}'.format(correct, len(test_indices)))
def print_prediction_from_file(file_name: str, description: str = None) -> None: print_prediction(LabeledExample(recording_directory / file_name), description=description)
def predict(sample: LabeledExample) -> str: return wav2letter.predict_single( sample.z_normalized_transposed_spectrogram())
def example(audio_file: Path) -> LabeledExample: return LabeledExample(audio_file, label_from_id=lambda id: self._remove_tags_to_ignore( labels_with_tags_by_id[id]), mel_frequency_count=self.mel_frequency_count, original_label_with_tags_from_id=lambda id: labels_with_tags_by_id[id])
def record_to_file(self, path: Path) -> LabeledExample: "Records from the microphone and outputs the resulting data to 'path'. Returns a labeled example for analysis." librosa.output.write_wav(str(path), self.record(), self.sample_rate) return LabeledExample(path)