def weight_mean_std(model_name, random=False):
    model = load_model(model_name, random)
    norm_dists = {}
    # norm_dists['layer'] = []
    norm_dists['mean'] = []
    norm_dists['std'] = []
    # pytorch model
    layers = []
    for name, m in model.named_modules():
        if type(m) == nn.Conv2d:
            layers.append(name)
            name.split('.')
            weights = m.weight.data.cpu().numpy()
            flat = weights.flatten()
            mu, std = norm.fit(flat)
            # norm_dists['layer'].append(name)
            norm_dists['mean'].append(mu)
            norm_dists['std'].append(std)
            print(f'Norm dist mean: {mu} and std: {std}')
            # plot_histogram(flat, name, model_name)
    plot_two_scales(norm_dists,
                    model_name,
                    x_labels=layers,
                    x_name='layers',
                    y_name='Mean',
                    y_name2='Std',
                    scale_fix=[-0.01, 0.15],
                    rotate=True)
def mean_var_overview(model_name, random):
    import torch.nn as nn
    model = load_model(model_name, random)
    means = []
    stds = []
    layers = []
    for name, m in model.named_modules():
        if type(m) == nn.Conv2d:
            layers.append(name)
            weights = m.weight.data.cpu().numpy()
            kernel_means = []
            kernel_stds = []
            for kernel_no in range(weights.shape[0]):
                kernel = weights[kernel_no]
                kernel_weights = kernel.flatten()
                kernel_means.append(np.mean(kernel_weights))
                kernel_stds.append(np.std(kernel_weights))
            means.append(np.mean(kernel_means))
            stds.append(np.mean(kernel_stds))
    plot_data_base({
        'means': means,
        'stds': stds
    },
                   f'Mean and Variance of kernels ' +
                   ('Trained' if not random else 'Untrained'),
                   layers,
                   x_name='Layer number',
                   y_name='value',
                   scale_fix=[-0.05, 0.2])
def weight_std_factor(model_name, random=False):
    model = load_model(model_name, True)
    norm_dists = {}
    # norm_dists['layer'] = []
    # norm_dists['mean'] = []
    norm_dists['value'] = []
    # pytorch model
    layers = []
    for name, m in model.named_modules():
        if type(m) == nn.Conv2d:
            norm_dists['value'] = []
            layers.append(name)
            name.split('.')
            weights = m.weight.data.cpu().numpy()
            n_l = weights.shape[1] * weights.shape[2] * weights.shape[2]
            for i in range(weights.shape[0]):
                flat = weights[i].flatten()
                mu, std = norm.fit(flat)
                # norm_dists['layer'].append(name)
                norm_dists['value'].append(std * 1 / 2 * n_l)
                print(f'Std: {std}')
            # plot_histogram(flat, name, model_name)
            plot_data_base(norm_dists, name, x_name='layers', y_name='Mean')
def mean_compared(model_name, random):
    import torch.nn as nn
    model_untrained = load_model(model_name, True)
    model_trained = load_model(model_name, False)
    means_untrained = []
    means_trained = []
    layers = []
    for name, m in model_untrained.named_modules():
        if type(m) == nn.Conv2d:
            layers.append(name)
            weights = m.weight.data.cpu().numpy()
            kernel_means = []
            for kernel_no in range(weights.shape[0]):
                kernel = weights[kernel_no]
                kernel_weights = kernel.flatten()
                kernel_means.append(np.mean(np.abs(kernel_weights)))
            means_untrained.append(np.mean(kernel_means))
    for name, m in model_trained.named_modules():
        if type(m) == nn.Conv2d:
            # layers.append(name)
            weights = m.weight.data.cpu().numpy()
            kernel_means = []
            for kernel_no in range(weights.shape[0]):
                kernel = weights[kernel_no]
                kernel_weights = kernel.flatten()
                kernel_means.append(np.mean(np.abs(kernel_weights)))
            means_trained.append(np.mean(kernel_means))
    plot_data_base(
        {
            'means_untrained': means_untrained,
            'means_trained': means_trained
        },
        f'Mean trained and untrained',
        layers,
        x_name='Layer number',
        y_name='value',
        rotate=True)
Beispiel #5
0
def apply_fit_std_function(model, function, config):
    layers = []
    for name, m in model.named_modules():
        if type(m) == nn.Conv2d:
            layers.append(name)
    return len(layers)