示例#1
0
def init_modules(train_split=False):
    """Inits the dicts containing functions for generating modules."""
    if filtered_modules:
        return  # already initialized

    all_modules = collections.OrderedDict([])
    if train_split:
        all_modules['train-easy'] = modules.train(_make_entropy_fn(0, 3))
        all_modules['train-medium'] = modules.train(_make_entropy_fn(1, 3))
        all_modules['train-hard'] = modules.train(_make_entropy_fn(2, 3))
    else:
        all_modules['train'] = modules.train(_make_entropy_fn(0, 1))

    all_modules['interpolate'] = modules.test()
    all_modules['extrapolate'] = modules.test_extra()

    counts['train'] = FLAGS.per_train_module
    counts['train-easy'] = FLAGS.per_train_module // 3
    counts['train-medium'] = FLAGS.per_train_module // 3
    counts['train-hard'] = FLAGS.per_train_module // 3
    counts['interpolate'] = FLAGS.per_test_module
    counts['extrapolate'] = FLAGS.per_test_module

    for regime_, modules_ in six.iteritems(all_modules):
        filtered_modules[regime_] = _filter_and_flatten(modules_)
示例#2
0
def generate_problems(filter, difficulty, num_examples):
    """Generate question-answer pairs using the DeepMind Mathematics Dataset.

        Keyword arguments:
        filter -- only use modules that contain this keyword
        difficulty -- float between 0.0 and 1.0 corresponding to the entropy used in generating constants\
            for each problem type
        num_examples -- number of problems to generate for each module
        """
    problems = collections.defaultdict(lambda: [])
    initial_modules = modules.train(util._make_entropy_fn(difficulty, 1))
    filtered_modules = util._filter_and_flatten(filter, initial_modules)
    for module_name, module in six.iteritems(filtered_modules):
        # These magic print constants make the header bold.
        if FLAGS.print_results:
            print("\033[1m{}\033[0m".format(module_name))
        num_dropped = 0
        for _ in range(num_examples):
            problem, extra_dropped = util.sample_from_module(module)
            num_dropped += extra_dropped
            if FLAGS.print_results:
                print(f"Module name: {module_name}")
            problems[module_name].append(problem)
        if num_dropped > 0:
            if FLAGS.print_results:
                logging.warning("Dropped %d examples", num_dropped)
    return problems
示例#3
0
 def __init__(self,
              categories=["algebra__linear_1d", "probability"],
              difficulty=0.5,
              num_iterations=12,
              batch_size=4):
     problems = collections.defaultdict(lambda: [])
     initial_modules = modules.train(_make_entropy_fn(difficulty, 1))
     filtered_modules = _filter_and_flatten(categories, initial_modules)
     self.sampled_modules = list(six.iteritems(filtered_modules))
     self.num_iterations = int(num_iterations * batch_size)
示例#4
0
    def __getitem__(self, idx):
        difficulty = self.current_iteration / self.total_iterations
        initial_modules = modules.train(_make_entropy_fn(difficulty, 1))
        filtered_modules = _filter_and_flatten(self.categories,
                                               initial_modules)
        self.sampled_modules = list(six.iteritems(filtered_modules))

        problem = sample_from_module(self.sampled_modules[np.random.randint(
            0, len(self.sampled_modules), (1))[0]][1],
                                     show_dropped=False)[0]
        # converts to tokens and adds BOS and EOS tokens
        ques, anws = np_encode_string(str(problem[0])), np_encode_string(
            str(problem[1]))

        self.current_iteration += 1
        return ques, anws
示例#5
0
    def __init__(self,
                 categories,
                 mean_accuracy_by_category,
                 model,
                 difficulty=0.5,
                 num_iterations=12,
                 batch_size=4,
                 starting_eps=0,
                 eps_grad=0):
        self.categories = categories
        self.model = model
        self.total_iterations = int(num_iterations * batch_size)
        self.current_iteration = 0
        self.starting_eps = starting_eps
        self.eps_grad = eps_grad
        assert (len(self.categories) == len(mean_accuracy_by_category))

        self.category_probabilities = self.model.forward(
            mean_accuracy_by_category)
        initial_modules = modules.train(_make_entropy_fn(difficulty, 1))
        filtered_modules = _filter_and_flatten(categories, initial_modules)
        self.sampled_modules = list(six.iteritems(filtered_modules))
        assert (torch.sum(self.category_probabilities) == 1)
示例#6
0
def _sampling_func_from_module(mname, entropy_args):
    pname = mname.split('__')
    func = modules.train(
        generate._make_entropy_fn(*entropy_args))[pname[0]][pname[1]]
    return func