예제 #1
0
def _evaluate_min_params(result,
                         params='result',
                         n_minimum_search=None,
                         random_state=None):
    """Returns the minimum based on `params`"""
    x_vals = None
    space = result.space
    if isinstance(params, str):
        if params == 'result':
            # Using the best observed result
            x_vals = result.x
        elif params == 'expected_minimum':
            if result.space.is_partly_categorical:
                # space is also categorical
                raise ValueError('expected_minimum does not support any'
                                 'categorical values')
            # Do a gradient based minimum search using scipys own minimizer
            if n_minimum_search:
                # If a value for
                # expected_minimum_samples has been parsed
                x_vals, _ = expected_minimum(result,
                                             n_random_starts=n_minimum_search,
                                             random_state=random_state)
            else:  # Use standard of 20 random starting points
                x_vals, _ = expected_minimum(result,
                                             n_random_starts=20,
                                             random_state=random_state)
        elif params == 'expected_minimum_random':
            # Do a minimum search by evaluating the function with
            # n_samples sample values
            if n_minimum_search:
                # If a value for
                # n_minimum_samples has been parsed
                x_vals, _ = expected_minimum_random_sampling(
                    result,
                    n_random_starts=n_minimum_search,
                    random_state=random_state)
            else:
                # Use standard of 10^n_parameters. Note this
                # becomes very slow for many parameters
                x_vals, _ = expected_minimum_random_sampling(
                    result,
                    n_random_starts=10**len(result.x),
                    random_state=random_state)
        else:
            raise ValueError('Argument ´eval_min_params´ must be a valid'
                             'string (´result´)')
    elif isinstance(params, list):
        assert len(params) == len(result.x), 'Argument' \
            '´eval_min_params´ of type list must have same length as' \
            'number of features'
        # Using defined x_values
        x_vals = params
    else:
        raise ValueError('Argument ´eval_min_params´ must'
                         'be a string or a list')
    return x_vals
예제 #2
0
def test_expected_minimum():
    res = gp_minimize(bench3, [(-2.0, 2.0)],
                      x0=[0.],
                      noise=1e-8,
                      n_calls=20,
                      random_state=1)

    x_min, f_min = expected_minimum(res, random_state=1)
    x_min2, f_min2 = expected_minimum(res, random_state=1)

    assert f_min <= res.fun  # true since noise ~= 0.0
    assert x_min == x_min2
    assert f_min == f_min2
예제 #3
0
def test_expected_minimum():
    res = gp_minimize(bench3,
                      [(-2.0, 2.0)],
                      x0=[0.],
                      noise=1e-8,
                      n_calls=20,
                      random_state=1)

    x_min, f_min = expected_minimum(res, random_state=1)
    x_min2, f_min2 = expected_minimum(res, random_state=1)

    assert f_min <= res.fun  # true since noise ~= 0.0
    assert x_min == x_min2
    assert f_min == f_min2
    def plot_results(self):
        """
        Visualize Hyperparameter Optimization Results
        """
        super(SkoptOptimizer, self).plot_results()

        import skopt.plots
        try:
            skopt.plots.plot_convergence(self.skopt_result)
        except:
            pass

        try:
            skopt.plots.plot_objective(self.skopt_result)
        except:
            pass

        try:
            skopt.plots.plot_evaluations(self.skopt_result)
        except:
            pass

        try:
            from skopt import expected_minimum
            print(expected_minimum(self.skopt_result))
        except:
            pass
예제 #5
0
def test_evaluate_min_params():
    res = gp_minimize(bench3, [(-2.0, 2.0)],
                      x0=[0.],
                      noise=1e-8,
                      n_calls=8,
                      n_random_starts=3,
                      random_state=1)

    x_min, f_min = expected_minimum(res, random_state=1)
    x_min2, f_min2 = expected_minimum_random_sampling(res,
                                                      n_random_starts=1000,
                                                      random_state=1)
    plots.plot_gaussian_process(res)
    assert _evaluate_min_params(res, params='result') == res.x
    assert _evaluate_min_params(res, params=[1.]) == [1.]
    assert _evaluate_min_params(res, params='expected_minimum',
                                random_state=1) == x_min
    assert _evaluate_min_params(res,
                                params='expected_minimum',
                                n_minimum_search=20,
                                random_state=1) == x_min
    assert _evaluate_min_params(res,
                                params='expected_minimum_random',
                                n_minimum_search=1000,
                                random_state=1) == x_min2
예제 #6
0
def test_plots_work():
    """Basic smoke tests to make sure plotting doesn't crash."""
    SPACE = [
        Integer(1, 20, name='max_depth'),
        Integer(2, 100, name='min_samples_split'),
        Integer(5, 30, name='min_samples_leaf'),
        Integer(1, 30, name='max_features'),
        Categorical(['gini', 'entropy'], name='criterion'),
        Categorical(list('abcdefghij'), name='dummy'),
    ]

    def objective(params):
        clf = DecisionTreeClassifier(random_state=3,
                                     **{
                                         dim.name: val
                                         for dim, val in zip(SPACE, params)
                                         if dim.name != 'dummy'
                                     })
        return -np.mean(cross_val_score(clf, *load_breast_cancer(True)))

    res = gp_minimize(objective, SPACE, n_calls=10, random_state=3)

    x_min, f_min = expected_minimum_random_sampling(res, random_state=1)
    x_min2, f_min2 = expected_minimum(res, random_state=1)

    assert x_min == x_min2
    assert f_min == f_min2

    plots.plot_convergence(res)
    plots.plot_evaluations(res)
    plots.plot_objective(res)
    plots.plot_objective(res, minimum='expected_minimum_random')
    plots.plot_objective(res,
                         sample_source='expected_minimum_random',
                         n_minimum_search=10000)
    plots.plot_objective(res, sample_source='result')
    plots.plot_regret(res)
예제 #7
0
def get_expected_minimum(result: OptimizeResult,
                         **kwargs) -> Optional[List[float]]:
    if len(result.models) == 0:
        return None

    return expected_minimum(result, **kwargs)
예제 #8
0
def test_plots_work():
    """Basic smoke tests to make sure plotting doesn't crash."""
    SPACE = [
        Integer(1, 20, name='max_depth'),
        Integer(2, 100, name='min_samples_split'),
        Integer(5, 30, name='min_samples_leaf'),
        Integer(1, 30, name='max_features'),
        Categorical(['gini', 'entropy'], name='criterion'),
        Categorical(list('abcdefghij'), name='dummy'),
    ]

    def objective(params):
        clf = DecisionTreeClassifier(random_state=3,
                                     **{
                                         dim.name: val
                                         for dim, val in zip(SPACE, params)
                                         if dim.name != 'dummy'
                                     })
        return -np.mean(cross_val_score(clf, *load_breast_cancer(True)))

    res = gp_minimize(objective, SPACE, n_calls=10, random_state=3)

    x = [[11, 52, 8, 14, 'entropy', 'f'], [14, 90, 10, 2, 'gini', 'a'],
         [7, 90, 6, 14, 'entropy', 'f']]
    samples = res.space.transform(x)
    xi_ = [1., 10.5, 20.]
    yi_ = [-0.9240883492576596, -0.9240745890422687, -0.9240586402439884]
    xi, yi = partial_dependence_1D(res.space,
                                   res.models[-1],
                                   0,
                                   samples,
                                   n_points=3)
    assert_array_almost_equal(xi, xi_)
    assert_array_almost_equal(yi, yi_, 1e-3)

    xi_ = [0, 1]
    yi_ = [-0.9241087603770617, -0.9240188905968352]
    xi, yi = partial_dependence_1D(res.space,
                                   res.models[-1],
                                   4,
                                   samples,
                                   n_points=3)
    assert_array_almost_equal(xi, xi_)
    assert_array_almost_equal(yi, yi_, 1e-3)

    xi_ = [0, 1]
    yi_ = [1., 10.5, 20.]
    zi_ = [[-0.92412562, -0.92403575], [-0.92411186, -0.92402199],
           [-0.92409591, -0.92400604]]
    xi, yi, zi = partial_dependence_2D(res.space,
                                       res.models[-1],
                                       0,
                                       4,
                                       samples,
                                       n_points=3)
    assert_array_almost_equal(xi, xi_)
    assert_array_almost_equal(yi, yi_)
    assert_array_almost_equal(zi, zi_, 1e-3)

    x_min, f_min = expected_minimum_random_sampling(res, random_state=1)
    x_min2, f_min2 = expected_minimum(res, random_state=1)

    x_min, f_min = expected_minimum_random_sampling(res, random_state=1)
    x_min2, f_min2 = expected_minimum(res, random_state=1)

    assert x_min == x_min2
    assert f_min == f_min2

    plots.plot_convergence(res)
    plots.plot_evaluations(res)
    plots.plot_objective(res)
    plots.plot_objective(res, dimensions=["a", "b", "c", "d", "e", "f"])
    plots.plot_objective(res, minimum='expected_minimum_random')
    plots.plot_objective(res,
                         sample_source='expected_minimum_random',
                         n_minimum_search=10000)
    plots.plot_objective(res, sample_source='result')
    plots.plot_regret(res)
    plots.plot_objective_2D(res, 0, 4)
    plots.plot_histogram(res, 0, 4)
예제 #9
0
def make_figure(datasets, opt, figure_path, n_levels=32):
    fig = plt.figure(figsize=(5.1, 1.9))
    gs = gridspec.GridSpec(2, 4, width_ratios=[8, 8, 10, 1])
    exp_axis = plt.subplot(gs[0, 0])
    err_axis = plt.subplot(gs[0, 1])
    com_axis = plt.subplot(gs[1, 0])
    cst_axis = plt.subplot(gs[1, 1])
    fit_axis = plt.subplot(gs[0:2, 2])
    leg_axis = plt.subplot(gs[0:2, 3])

    (dataset, label, color, color_conf, linestyle) = datasets

    for measure, axis in [('expressivity', exp_axis), ('error', err_axis),
                          ('complexity', com_axis), ('cost', cst_axis)]:
        mean, conf_pos, conf_neg, start_gen = dataset[measure]
        xvals = list(range(start_gen, start_gen + len(mean)))
        axis.plot(xvals,
                  mean,
                  c=color,
                  label=label,
                  linestyle=linestyle,
                  dash_capstyle="round",
                  linewidth=2)
        axis.fill_between(xvals,
                          conf_neg,
                          conf_pos,
                          facecolor=color_conf,
                          alpha=0.5)

        ylim = measure_bounds[measure]
        ypad = (ylim[1] - ylim[0]) * 0.05
        axis.set_ylim(ylim[0] - ypad, ylim[1] + ypad)
        axis.set_ylabel(measures_names[measure], fontsize=8)
        axis.set_yticklabels([])
        axis.set_yticks([])
        for tick in axis.xaxis.get_major_ticks():
            tick.label.set_fontsize(7)
        axis.set_xlim(0, 10)
        if measure == 'expressivity' or measure == 'error':
            axis.set_xticks([])
        else:
            axis.set_xticks([0, 2, 4, 6, 8, 10])
            axis.set_xlabel('Generation', fontsize=8)

    (w_star, e_star), neg_log_p_sim = expected_minimum(opt)
    print('Simplicity:', len(opt.func_vals), 'iterations)')
    print('w* =', w_star)
    print('e* =', e_star)
    print('log(P) =', -neg_log_p_sim)

    space = opt.space
    samples = np.asarray(opt.x_iters)
    rvs_transformed = space.transform(space.rvs(n_samples=250))

    xi, yi, zi = plots.partial_dependence(space, opt.models[-1], 1, 0,
                                          rvs_transformed, 250)

    zmin = zi.min()
    zmax = zi.max()
    levels = np.geomspace(zmin, zmax, n_levels + 1)

    cs = fit_axis.contourf(xi, yi, zi, levels, cmap='viridis_r')

    fit_axis.plot([0, w_star, w_star], [e_star, e_star, 0],
                  c='k',
                  linestyle=':')
    fit_axis.scatter(w_star, e_star, c='k', s=100, lw=0, marker='*')

    fit_axis.set_xlim(0, 2)
    fit_axis.set_ylim(0, 1)

    fit_axis.set_xticks([0, 0.5, 1.0, 1.5, 2.0])
    for tick in fit_axis.xaxis.get_major_ticks():
        tick.label.set_fontsize(7)

    fit_axis.set_yticklabels([])
    fit_axis.set_yticks([])

    fit_axis.set_xlabel('Weight (w)', fontsize=8)
    fit_axis.set_ylabel('Noise (ε)', fontsize=8)

    cb = plt.colorbar(cs, cax=leg_axis)
    cb.set_ticks([])
    leg_axis.yaxis.set_label_position("left")
    leg_axis.set_ylabel('log likelihood', fontsize=8)
    leg_axis.invert_yaxis()

    fig.tight_layout(pad=0.1, h_pad=0.5, w_pad=0.5, rect=(0.01, 0, 1, 1))
    fig.savefig(figure_path, format='svg')
    tools.format_svg_labels(figure_path)
    if not figure_path.endswith('.svg'):
        tools.convert_svg(figure_path, figure_path)