def full_test(bpmf,
              samples,
              real,
              key_name,
              num_samps=128,
              lookahead_fit='batch',
              lookahead_samps=128,
              pool=None,
              multieval=False,
              init_rmse=None,
              test_on=Ellipsis):
    key = KEYS[key_name]
    total = real.size
    picker_fn = getattr(bpmf, key.key_fn)
    chooser = np.argmax if key.choose_max else np.argmin

    if init_rmse is None:
        init_rmse = bpmf.bayes_rmse(samples, real, which=test_on)
    yield (len(bpmf.rated), init_rmse, None, None)

    while bpmf.unrated:
        print("{:<40} Picking query point {}...".format(
            key.nice_name,
            len(bpmf.rated) + 1))

        if len(bpmf.unrated) == 1:
            vals = None
            i, j = next(iter(bpmf.unrated))
        else:
            unrated = np.array(list(bpmf.unrated)).T
            which = tuple(unrated)

            key_kwargs = {'which': which}
            if key.wants_pool and pool is not None:
                key_kwargs['pool'] = pool

            # XXX: should use lookahead_samps in here somewhere?
            evals = picker_fn(samples, *key.args, **key_kwargs)

            i, j = unrated[:, chooser(evals)]
            vals = bpmf.matrix_results(evals, which)

        bpmf.add_rating(i, j, real[i, j])
        print("{:<40} Queried ({}, {}); {}/{} known".format(
            key.nice_name, i, j, len(bpmf.rated), total))

        samp_args = (bpmf, num_samps)
        samp_kwargs = {'fit_first': True}
        if multieval:
            samples, pred = pool.apply(fetch_samples, samp_args, samp_kwargs)
        else:
            samples, pred = fetch_samples(*samp_args, **samp_kwargs)

        err = rmse(pred[test_on], real[test_on])
        print("{:<40} RMSE {}: {:.5}".format(key.nice_name, len(bpmf.rated),
                                             err))
        yield len(bpmf.rated), err, (i, j), vals
def full_test(bpmf, samples, real, key_name,
              num_samps=128, lookahead_fit='batch', lookahead_samps=128,
              pool=None, multieval=False, init_rmse=None, test_on=Ellipsis):
    key = KEYS[key_name]
    total = real.size
    picker_fn = getattr(bpmf, key.key_fn)
    chooser = np.argmax if key.choose_max else np.argmin

    if init_rmse is None:
        init_rmse = bpmf.bayes_rmse(samples, real, which=test_on)
    yield (len(bpmf.rated), init_rmse, None, None)

    while bpmf.unrated:
        print("{:<40} Picking query point {}...".format(
            key.nice_name, len(bpmf.rated) + 1))

        if len(bpmf.unrated) == 1:
            vals = None
            i, j = next(iter(bpmf.unrated))
        else:
            unrated = np.array(list(bpmf.unrated)).T
            which = tuple(unrated)

            key_kwargs = { 'which': which }
            if key.wants_pool and pool is not None:
                key_kwargs['pool'] = pool

            # XXX: should use lookahead_samps in here somewhere?
            evals = picker_fn(samples, *key.args, **key_kwargs)

            i, j = unrated[:, chooser(evals)]
            vals = bpmf.matrix_results(evals, which)

        bpmf.add_rating(i, j, real[i, j])
        print("{:<40} Queried ({}, {}); {}/{} known".format(
                key.nice_name, i, j, len(bpmf.rated), total))

        samp_args = (bpmf, num_samps)
        samp_kwargs = {'fit_first': True}
        if multieval:
            samples, pred = pool.apply(fetch_samples, samp_args, samp_kwargs)
        else:
            samples, pred = fetch_samples(*samp_args, **samp_kwargs)

        err = rmse(pred[test_on], real[test_on])
        print("{:<40} RMSE {}: {:.5}".format(
            key.nice_name, len(bpmf.rated), err))
        yield len(bpmf.rated), err, (i,j), vals
 def bayes_rmse(self, samples_iter, true_r, which=Ellipsis):
     return rmse(self.predict(samples_iter, which), true_r[which])
 def bayes_rmse(self, samples_iter, true_r, which=Ellipsis):
     return rmse(self.predict(samples_iter, which), true_r[which])