def features(self, parse): features = defaultdict(float) # TODO: turning off rule features seems to screw up learning # figure out what's going on here # maybe make an exercise of it! # Actually it doesn't seem to mess up final result. # But the train accuracy reported during SGD is misleading? features.update(rule_features(parse)) features.update(self.empty_denotation_feature(parse)) # EXERCISE: Experiment with additional features. return features
def find_best_rules(domain): model = domain.model() examples = domain.train_examples() metric = domain.training_metric() rule_counts = defaultdict(int) for example in examples: parses = model.parse_input(example.input) good_parses = [p for p in parses if metric.evaluate(example, [p])] if good_parses: best_parse = good_parses[0] features = rule_features(best_parse) for rule, count in list(features.items()): rule_counts[rule] = rule_counts[rule] + count counts = [(count, rule) for rule, count in list(rule_counts.items())] counts = sorted(counts, reverse=True) for count, rule in counts: print('%d\t%s' % (count, rule))
def features(self, parse): features = rule_features(parse) features.update(self.operator_precedence_features(parse)) return features
def features(self, parse): features = rule_features(parse) return features
def features(self, parse): return rule_features(parse)