def adapt_and_test():
    import os
    import dill
    from playground.maml.maml_torch.maml_multi_step import FunctionalMLP

    logger.configure(log_directory=Args.log_dir, prefix=Args.log_prefix)
    logger.log_params(Args=vars(Args))

    # load weights
    with open(os.path.join(Args.log_dir, Args.log_prefix, Args.weight_path),
              'rb') as f:
        weights = dill.load(f)
    model = FunctionalMLP(1, 1)

    losses = DefaultBear(list)
    for amp, task in amp_tasks:
        model.params.update({
            k: t.tensor(v, requires_grad=True, dtype=t.double).to(device)
            for k, v in weights[0].items()
        })
        sgd = t.optim.SGD(model.parameters(), lr=Args.learning_rate)
        proper = t.tensor(task.proper()).to(device)
        samples = t.tensor(task.samples(Args.k_shot)).to(device)

        for grad_ind in range(Args.grad_steps):
            with t.no_grad():
                xs, labels = proper
                ys = model(xs.unsqueeze(-1))
                loss = model.criteria(ys, labels.unsqueeze(-1))
                logger.log(grad_ind,
                           loss=loss.item(),
                           silent=grad_ind != Args.grad_steps - 1)
                losses[f"amp-{amp:.2f}-loss"].append(loss.item())

            xs, labels = samples
            ys = model(xs.unsqueeze(-1))
            loss = model.criteria(ys, labels.unsqueeze(-1))
            sgd.zero_grad()
            loss.backward()
            sgd.step()
        # losses = np.array([v for k, v in losses.items()])

    import matplotlib.pyplot as plt
    fig = plt.figure()
    plt.title(f'Learning Curves')
    for amp, task in amp_tasks:
        plt.plot(losses[f"amp-{amp:.2f}-loss"], label=f"amp {amp:.2f}")
    plt.legend()
    logger.log_pyplot(None, key=f"losses/learning_curves_amp.png", fig=fig)
    plt.close()

    average_losses = np.array(
        [losses[f"amp-{amp:.2f}-loss"] for amp, task in amp_tasks])
    fig = plt.figure()
    plt.title(f'Learning Curves Averaged amp ~ [5 - 10]')
    plt.plot(average_losses.mean(0))
    plt.ylim(0, 28)
    logger.log_pyplot(None, key=f"losses/learning_curves_amp_all.png", fig=fig)
    plt.close()
Exemple #2
0
    def fit_estimator(self, print_fit_result=True):  #todo set to False
        """
    Fits the estimator with the provided data

    Args:
      print_fit_result: boolean that specifies whether the fitted distribution shall be plotted (only works if ndim_x and ndim_y = 1)
    """

        self.time_to_fit = None
        if not self.estimator.fitted:  # fit estimator if necessary
            t_start = time.time()
            self.estimator.fit(self.X, self.Y, verbose=False)
            self.time_to_fit = (
                time.time() - t_start
            ) * self.n_observations / 1000  # time to fit per 1000 samples

        if print_fit_result and self.estimator.fitted:
            if self.probabilistic_model.ndim_x == 1 and self.probabilistic_model.ndim_y == 1:
                plt3d_true = self.probabilistic_model.plot(mode="pdf",
                                                           numpyfig=False)
                logger.log_pyplot(key=self.task_name, fig=plt3d_true)
                plt.close(plt3d_true)

            if self.estimator.ndim_x == 1 and self.estimator.ndim_y == 1:
                plt2d = self.estimator.plot2d(show=False, numpyfig=False)
                plt3d = self.estimator.plot3d(show=False, numpyfig=False)
                logger.log_pyplot(key=self.task_name + "_fitted_cond_distr_2d",
                                  fig=plt2d)
                logger.log_pyplot(key=self.task_name + "_fitted_cond_distr_3d",
                                  fig=plt3d)
                plt.close(plt2d)
                plt.close(plt3d)

if __name__ == "__main__":
    import os

    # launch_training()
    logger.configure(log_directory=Args.log_dir, prefix=Args.log_prefix)
    # logger.log_params(Args=vars(Args))
    root_dir = "/Users/ge/machine_learning/berkeley-playground/ins-runs/2018-06-09/maml-baselines"
    # data_paths = {"mlp": "sinusoid-maml-mlp/debug/modules/28000_FunctionalMLP.pkl",
    #               "rnn": "sinusoid-maml-rnn/debug/modules/28000_FunctionalRNN.pkl"}
    data_paths = {
        "rnn": "sinusoid-maml-rnn/debug/modules/28000_FunctionalRNN.pkl"
    }
    for key, path in data_paths.items():
        Args.weight_path = os.path.join(root_dir, path)
        from playground.maml.maml_torch.maml_multi_step import FunctionalMLP, FunctionalRNN

        if key == 'mlp':
            model = FunctionalMLP(1, 1)
        elif key == 'rnn':
            model = FunctionalRNN(1, 1, 10)

        fig_adaptation, fig_out = adapt_and_test(model)
        logger.log_pyplot(None,
                          key=f"{key}-adaptation.png",
                          fig=fig_adaptation)
        logger.log_pyplot(None,
                          key=f"{key}-out-of-distribution.png",
                          fig=fig_out)
Exemple #4
0
import os
import scipy.misc
import matplotlib

matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
import numpy as np
from ml_logger import logger

logger.configure(os.path.realpath('.'))
face = scipy.misc.face()
logger.log_image(0, test_image=face)

fig = plt.figure(figsize=(4, 2))
xs = np.linspace(0, 5, 1000)
plt.plot(xs, np.cos(xs))
logger.log_pyplot(0, fig)
plt.close()