コード例 #1
0
def calc_eigs(**kwargs):
    """
    Calculates the inter layer covariance and corresponding eigen values and stores them as 'inter_layer_covariance'.
    Dependency: train_model

    :param kwargs:
    :return:
    """
    model_wrapper = odin.model_wrapper = load_model(kwargs.get("model"),
                                                    **kwargs)

    co.calc_inter_layer_covariance(model_wrapper=model_wrapper, **kwargs)

    datastore = model_wrapper.get_group("inter_layer_covariance")
    cov = datastore["cov"]
    eigen_values = datastore["eigen_values"]

    if kwargs["plot"]:
        for i, eigs in enumerate(eigen_values):
            l = len(eigs)
            eigs = abs(eigs)
            oplt.plot_eigen_values(eigs,
                                   title="%s layer %d (%d)" %
                                   (model_wrapper.model_name, i, l))
            oplt.save("eigs(%d)" % i)
            oplt.plot_matrix(cov[i], title="Covariance (%d)" % i)
            oplt.save("cov(%d)" % i)

    return cov, eigen_values
コード例 #2
0
def range_test_plot(**kwargs):
    """
    Plot the result of range test
    dependency: range_test

    :param kwargs:
    :return:
    """
    model_wrapper = load_model(kwargs.get("model"), **kwargs)
    data = model_wrapper.get_group("range_test",
                                   experiment=kwargs.get("experiment"))
    rho_range = data["rho_range"]
    all_lambdas = data["all_lambdas"]
    all_layer_widths = data["all_layer_widths"]

    n_layers = len(all_lambdas)

    fig = oplt.figure()
    # oplt.suptitle(r"Different layer widths for $\rho \in [0.01,0.15]$")
    oplt.subplot(2, 1, 1)
    oplt.plot(rho_range, all_lambdas)
    oplt.labels(r"$\rho$", "$\lambda_{\ell}$")
    oplt.legend([r"$\lambda_{%d}$" % l for l in range(n_layers)])
    oplt.subplot(2, 1, 2)
    oplt.plot(rho_range, all_layer_widths)
    oplt.labels(r"$\rho$", "$\hat{m}_{\ell}$")
    oplt.legend([r"$\hat{m}_{%d}$" % l for l in range(n_layers)])
    oplt.save("range_test", experiment=kwargs.get("experiment"))
    oplt.show()

    return fig
コード例 #3
0
ファイル: lambda_param.py プロジェクト: Anderssorby/odin
def plot_lambdas(lambdas, bounds, layer_widths, prefix="line"):
    n = len(layer_widths[0])

    def l_m2(l): return np.sum(np.sqrt(l), axis=1)

    def l_1(l): return np.sum(np.abs(l), axis=1)

    def only_col(l, col): return np.transpose(l)[col]

    fig, (ax1, ax2) = oplt.subplots(2, 1, sharex=True)
    ax1.set_xlabel("$\sum_l\sqrt{\lambda_\ell}\leq D$")
    ax1.set_ylabel("$m_\ell$")

    color = 'tab:blue'
    ax2.set_ylabel('$\lambda_\ell$', color=color)  # we already handled the x-label with ax1
    ax2.tick_params(axis='y', labelcolor=color)

    # legends = []

    for i in range(n):
        reg_y = only_col(layer_widths, i)

        handle = ax1.plot(bounds, reg_y, '-', label="$m_%d$" % i)

        # legends.append(handle)

        # ax1.plot(np.repeat(optimal[i], layer_widths.shape[1]))
        # legends.append("optimal $m_%d$" % i)

        lamb_i = only_col(lambdas, i)
        handle = ax2.plot(bounds, lamb_i, '-', label='$\lambda_%d$' % i)
        # legends.append(handle)

    oplt.title("Optimization of DOF (%s)" % prefix)

    ax1.legend()
    ax2.legend()

    fig.tight_layout()

    oplt.save("%s_lambda_dof" % prefix)
    oplt.show()
コード例 #4
0
def plot():
    train, test = np.load("data/mini.npy")
    n_train = len(train[1])
    n_test = len(test[1])
    trans = np.mean

    oplt.figure()
    oplt.subplot(2, 1, 1)
    oplt.plot(np.arange(n_train), trans(train[0], axis=1))
    oplt.plot(np.arange(n_test), trans(test[0], axis=1))
    oplt.legend(["train x", "test x"])
    oplt.labels("sample X", "$\|X\|$")

    oplt.subplot(2, 1, 2)
    oplt.plot(np.arange(n_train), train[1], linestyle="steps")
    oplt.plot(np.arange(n_test), test[1], linestyle="steps")
    oplt.legend(["train y", "test y"])
    oplt.labels("sample X", "$Y$")
    oplt.tight_layout()

    oplt.save("mini_dataset")
    oplt.show()
コード例 #5
0
ファイル: keras_models.py プロジェクト: Anderssorby/odin
    def sample_images(self, epoch):
        r, c = 5, 5
        noise = np.random.normal(0, 1, (r * c, ) + self.latent_dim)
        gen_imgs = self.generator.predict(noise)

        # Rescale images 0 - 1
        gen_imgs = 0.5 * gen_imgs + 1

        fig, axs = oplt.subplots(r, c)
        cnt = 0
        for i in range(r):
            for j in range(c):
                if self.channels == 1:
                    axs[i, j].imshow(gen_imgs[cnt, :, :, 0], cmap="gray")
                else:
                    axs[i, j].imshow(gen_imgs[cnt, :, :, :])
                axs[i, j].axis('off')
                cnt += 1
        oplt.save(category="sample_images",
                  name=self.dataset_name + "_%d" % epoch,
                  figure=fig)
        oplt.close()
コード例 #6
0
def train_model(**kwargs):
    """
    Train the specified model. Commandline arguments will be passed to the training function.
    :param kwargs:
    :return:
    """
    # Remove None values
    kwargs = {a: b for a, b in kwargs.items() if b is not None}
    kwargs['new_model'] = True
    model_wrapper = load_model(kwargs.get("model"), **kwargs)

    model_wrapper.train(**kwargs)
    model_wrapper.save()

    if model_wrapper.history:
        model_wrapper.put_group("training_history",
                                {"history": model_wrapper.history.history})
        if kwargs["plot"]:
            oplt.plot_model_history(model_wrapper)
            oplt.save("loss")
            oplt.show()

    return model_wrapper