Exemple #1
0
    def plot_prediction_1D(self, x0, bounds, idx_output=0, idx_input=0, resolution_prediction=1000):
        """
        Plot the model sliced (along each dimension) around a pivot point x0
        :param x0: pivot point
        :param bounds: bounds for the predictions
        :param idx_input:
        :param resolution_prediction:
        :param interactive:
        :return:
        """
        import matplotlib.pyplot as plt
        import scipyplot as spp

        # TODO:assert bounds ot type utils.bounds
        # TODO: THIS IS not goirg to work FOR MULTI_DIMENSIONAL MODELS!!!!

        idx = idx_input
        if bounds.get_n_dim() == 1:
            X = np.matrix(np.linspace(bounds.get_min(idx).flatten(), bounds.get_max(idx).flatten(), num=resolution_prediction))
        else:
            X = np.matrix(np.tile(x0, (resolution_prediction, 1)))
            X[:, idx] = np.linspace(bounds.get_min(idx).flatten(), bounds.get_max(idx).flatten(), num=resolution_prediction)
        prediction = self.predict(X)
        if self.isprobabilistic():
            h = spp.gauss_1D(x=X[idx, :], y=prediction[0][:, idx_output], variance=prediction[1][:, idx_output],
                             color='b') # X[:, idx]
        else:
            h = plt.plot(X[idx, :], prediction[:, idx_output], color='b', linestyle='-', linewidth=2,
                         label='Prediction')
Exemple #2
0
 def plotComparison(idx):
     handle = [None, None]
     handle[0] = plt.plot(dataset.output[idx, :].T, color='g', linestyle='-', linewidth=2, marker='o', label='Groundtruth')
     if self.isprobabilistic():
         handle[1] = spp.gauss_1D(y=prediction[0][:, idx], variance=prediction[1][:, idx], color='b')
         # handle[1] = plt.plot(prediction[0][:, idx], color='b', linestyle='-', linewidth=2, label='Prediction')
     else:
         handle[1] = plt.plot(prediction[:, idx], color='b', linestyle='-', linewidth=2, label='Prediction')
     plt.legend()
     plt.title(dataset.name)
     plt.ylabel(dataset.get_label_input(idx))
Exemple #3
0
    def plot_optimization_curve(self, scale='log', plotDelta=True):
        import scipyplot as spp

        logs = self.get_logs()
        plt.figure()
        # logs.plot_optimization_curve()

        if (self.task.opt_obj is None) and (plotDelta is True):
            plt.plot(logs.get_objectives().T, c='red', linewidth=2)
            plt.ylabel('Obj.Func.')
            n_evals = logs.data.m.shape[0]
            x = np.arange(start=logs.get_n_evals() - n_evals,
                          stop=logs.get_n_evals())
            spp.gauss_1D(y=logs.data.m,
                         variance=logs.data.v,
                         x=x,
                         color='blue')
            if self.log_best_mean:
                spp.gauss_1D(y=logs.data.best_m,
                             variance=logs.data.best_v,
                             x=x,
                             color='green')
        else:
            plt.plot(logs.get_objectives().T - self.task.opt_obj,
                     c='red',
                     linewidth=2)
            plt.ylabel('Optimality gap')
            n_evals = logs.data.m.shape[0]
            x = np.arange(start=logs.get_n_evals() - n_evals,
                          stop=logs.get_n_evals())
            spp.gauss_1D(y=logs.data.m - self.task.opt_obj,
                         variance=logs.data.v,
                         x=x,
                         color='blue')
            if self.log_best_mean:
                spp.gauss_1D(y=logs.data.best_m - self.task.opt_obj,
                             variance=logs.data.best_v,
                             x=x,
                             color='green')

        plt.xlabel('N. Evaluations')
        if scale == 'log':
            ax = plt.gca()
            ax.set_yscale('log')

        # TODO: best performance expected
        # if self.log_best_mean:
        #     plt.legend(['Performance evaluated', 'performance expected', 'Best performance expected'])
        # else:
        #     plt.legend(['Performance evaluated', 'performance expected'])
        plt.show()
Exemple #4
0
 def plotComparison(idx):
     if self.n_inputs > 1:
         X = np.tile(x0, (resolution_prediction, 1))
         X[:, idx] = np.linspace(bounds.get_min(idx), bounds.get_max(idx), num=resolution_prediction)
         prediction = self.predict(X)
         if self.isprobabilistic():
             h = spp.gauss_1D(x=X[:, idx], y=prediction[0][:, idx_output], variance=prediction[1][:, idx_output],
                              color='b')
         else:
             h = plt.plot(X[:, idx], prediction[:, idx_output], color='b', linestyle='-', linewidth=2,
                          label='Prediction')
         plt.axvline(x=x0[idx], linestyle='--', linewidth=2, color='r')
     else:
         X = np.linspace(bounds.get_min(), bounds.get_max(), num=resolution_prediction).T
         prediction = self.predict(X)
         if self.isprobabilistic():
             h = spp.gauss_1D(x=X, y=prediction[0], variance=prediction[1], color='b')
         else:
             h = plt.plot(X[:, idx], prediction, color='b', linestyle='-', linewidth=2, label='Prediction')
     # plt.legend()
     # plt.title(dataset.name)
     plt.title('%d' % (idx_output))
     plt.xlabel('%d' % (idx))
Exemple #5
0
 def test_gauss_1D_vector(self):
     y = np.random.rand(100)
     variance = np.random.rand(100)
     h = spp.gauss_1D(y=y, variance=variance)