Example #1
0
    def eval(self,
             activations_iterator: ActivationsIterator,
             verbose=False) -> MeasureResult:
        running_means = [
            RunningMeanWelford() for i in activations_iterator.layer_names()
        ]

        for x, transformation_activations in tqdm(
                activations_iterator.samples_first(), disable=not verbose):
            for x_transformed, activations in transformation_activations:
                for i, layer_activations in enumerate(activations):

                    if self.sign != 1:
                        layer_activations *= self.sign

                    activated: np.ndarray = (layer_activations >
                                             self.thresholds[i]) * 1.0
                    # print(activated.shape,activated.min(),activated.max(),activated.dtype)
                    if np.any(activated < 0):
                        print(activated)
                    running_means[i].update_all(activated)

        layers_l = [m.mean() for m in running_means]

        return MeasureResult(layers_l, activations_iterator.layer_names(),
                             self)
Example #2
0
    def eval(self, activations_iterator: ActivationsIterator):
        self.g = GlobalFiringRateNormalMeasure(self.alpha, self.sign)
        g_result = self.g.eval(activations_iterator)
        self.thresholds = g_result.extra_values["thresholds"]
        self.l = LocalFiringRateNormalMeasure(self.thresholds, self.sign)
        l_result = self.l.eval(activations_iterator)

        ratio = tm.divide_activations(l_result.layers, g_result.layers)
        return MeasureResult(ratio, activations_iterator.layer_names(), self)
Example #3
0
    def eval(self,
             activations_iterator: ActivationsIterator,
             verbose=False) -> MeasureResult:
        td_result = self.td.eval(activations_iterator, verbose)
        sd_result = self.sd.eval(activations_iterator, verbose)

        td_result = self.pre_normalization_transformation.apply(td_result)
        sd_result = self.pre_normalization_transformation.apply(sd_result)

        result = divide_activations(td_result.layers, sd_result.layers)
        return MeasureResult(result, activations_iterator.layer_names(), self)
    def eval(self,activations_iterator:ActivationsIterator):
        print("calculating global")
        self.g = GlobalFiringRateMeasure(self.activations_percentage,self.sign)
        g_result = self.g.eval(activations_iterator)
        print("calculating thresholds")
        self.thresholds = self.g.get_thresholds()
        print("calculating local")
        self.l = LocalFiringRateMeasure(self.thresholds,self.sign)
        l_result = self.l.eval(activations_iterator)

        ratio = divide_activations(l_result.layers,g_result.layers)
        return MeasureResult(ratio, activations_iterator.layer_names(), self)
Example #5
0
    def eval(self, activations_iterator: ActivationsIterator, verbose=False):
        self.g = GoodfellowNormalGlobalInvariance(self.alpha, self.sign)
        g_result = self.g.eval(activations_iterator, verbose)
        thresholds = g_result.extra_values[
            GoodfellowNormalGlobalInvariance.thresholds_key]
        self.l = GoodfellowNormalLocalInvariance(thresholds, self.sign)
        l_result = self.l.eval(activations_iterator, verbose)

        ratio = divide_activations(l_result.layers, g_result.layers)
        extra = {self.g_key: g_result, self.l_key: l_result}

        return MeasureResult(ratio,
                             activations_iterator.layer_names(),
                             self,
                             extra_values=extra)
Example #6
0
    def eval(self,
             activations_iterator: ActivationsIterator,
             verbose=False) -> MeasureResult:
        running_means = [
            RunningMeanAndVarianceWelford()
            for i in activations_iterator.layer_names()
        ]

        for transformation, samples_activations_iterator in tqdm(
                activations_iterator.transformations_first(),
                disable=not verbose):
            for x, batch_activations in samples_activations_iterator:
                for j, activations in enumerate(batch_activations):
                    if self.sign != 1: activations *= self.sign
                    running_means[j].update_all(activations)

        stds = [running_mean.std() for running_mean in running_means]
        means = [running_mean.mean() for running_mean in running_means]
        original_shapes = [mean.shape for mean in means]
        means = [mean.reshape(mean.size) for mean in means]
        stds = [std.reshape(std.size) for std in stds]
        # calculate the threshold values (approximately)
        thresholds = [np.zeros(mean.size) for mean in means]

        for i, (mean, std) in enumerate(zip(means, stds)):
            for j, (mu, sigma) in enumerate(zip(mean, std)):
                if sigma > 0:
                    t = norm.ppf(self.alpha, loc=mu, scale=sigma)
                else:
                    t = mu
                thresholds[i][j] = t

    #thresholds = mean+2*std
        thresholds = [
            threshold.reshape(original_shape)
            for threshold, original_shape in zip(thresholds, original_shapes)
        ]
        # set g(i) equal to the activations_percentage
        layers_g = [
            np.zeros_like(threshold) + (1 - self.alpha)
            for threshold in thresholds
        ]

        return MeasureResult(layers_g,
                             activations_iterator.layer_names(),
                             self,
                             extra_values={self.thresholds_key: thresholds})
Example #7
0
    def eval(self,
             activations_iterator: ActivationsIterator,
             verbose=False) -> MeasureResult:

        transformation_result = self.transformation_measure.eval(
            activations_iterator, verbose)
        sample_result = self.sample_measure.eval(activations_iterator, verbose)
        result = divide_activations(transformation_result.layers,
                                    sample_result.layers)

        extra_values = {
            self.transformation_key: transformation_result,
            self.sample_key: sample_result,
        }
        return MeasureResult(result,
                             transformation_result.layer_names,
                             self,
                             extra_values=extra_values)
Example #8
0
    def eval(self,
             activations_iterator: ActivationsIterator,
             verbose=False) -> MeasureResult:
        layer_names = activations_iterator.layer_names()
        n_layers = len(layer_names)
        mean_running = [RunningMeanWelford() for i in range(n_layers)]

        for transformation, transformation_activations in activations_iterator.transformations_first(
        ):
            # calculate the variance of all samples for this transformation
            for x, batch_activations in transformation_activations:
                for j, layer_activations in enumerate(batch_activations):
                    layer_measure = self.distance_aggregation.apply(
                        layer_activations)
                    mean_running[j].update(layer_measure)

        # calculate the final mean over all transformations (and layers)
        mean_variances = [b.mean() for b in mean_running]
        return MeasureResult(mean_variances, layer_names, self)
Example #9
0
    def eval(self,
             activations_iterator: ActivationsIterator,
             verbose=False) -> MeasureResult:
        layer_names = activations_iterator.layer_names()
        n_intermediates = len(layer_names)
        mean_running = [RunningMeanWelford() for i in range(n_intermediates)]
        for x, transformation_activations_iterator in activations_iterator.samples_first(
        ):
            # transformation_activations_iterator can iterate over all transforms
            for x_transformed, activations in transformation_activations_iterator:
                for j, layer_activations in enumerate(activations):
                    # calculate the distance aggregation only for this batch
                    layer_measure = self.distance_aggregation.apply(
                        layer_activations)
                    # update the mean over all transformation
                    mean_running[j].update(layer_measure)

        # calculate the final mean over all samples (and layers)
        mean_variances = [b.mean() for b in mean_running]
        return MeasureResult(mean_variances, layer_names, self)
Example #10
0
    def eval(self, activations_iterator: ActivationsIterator) -> MeasureResult:
        layer_names = activations_iterator.layer_names()
        n_intermediates = len(layer_names)
        layer_measures = [
            self.layer_measure_generator(i, n)
            for i, n in enumerate(layer_names)
        ]

        for r in layer_measures:
            r.on_begin()

        for activations, x_transformed in activations_iterator.samples_first():
            for r in layer_measures:
                r.on_begin_iteration()
            # activations has the activations for all the transformations
            for j, layer_activations in enumerate(activations):
                layer_measures[j].update_layer(layer_activations)

        results = [r.get_final_result() for r in layer_measures]
        return MeasureResult(results, layer_names, self)
Example #11
0
    def eval(self,
             activations_iterator: ActivationsIterator,
             verbose=False) -> MeasureResult:

        sample_result = self.sv.eval(activations_iterator)
        transformation_result = self.tv.eval(activations_iterator)

        # TODO REFACTOR NEW layer_transformation.py
        transformation_result = self.pre_normalization_transformation.apply(
            transformation_result)
        sample_result = self.pre_normalization_transformation.apply(
            sample_result)

        extra_values = {
            self.transformation_key: transformation_result,
            self.sample_key: sample_result,
        }

        result = divide_activations(transformation_result.layers,
                                    sample_result.layers)
        return MeasureResult(result, transformation_result.layer_names, self,
                             extra_values)
Example #12
0
    def eval(self,
             activations_iterator: ActivationsIterator,
             verbose=False) -> MeasureResult:
        activations_iterator: ActivationsIterator = activations_iterator.get_inverted_activations_iterator(
        )
        ts = list(map(str, (activations_iterator.get_transformations())))

        mean_variances_running = None

        for transformation, samples_activations_iterator in activations_iterator.transformations_first(
        ):
            samples_variances_running = None
            # calculate the variance of all samples for this transformation
            for x, batch_activations in samples_activations_iterator:
                if mean_variances_running is None:
                    n_layers = len(batch_activations)
                    mean_variances_running = [
                        RunningMeanWelford() for i in range(n_layers)
                    ]
                if samples_variances_running is None:
                    n_layers = len(batch_activations)
                    samples_variances_running = [
                        RunningMeanAndVarianceWelford()
                        for i in range(n_layers)
                    ]
                for j, layer_activations in enumerate(batch_activations):
                    samples_variances_running[j].update_all(layer_activations)
            # update the mean over all transformation (and layers)
            for layer_mean_variances_running, layer_samples_variance_running in zip(
                    mean_variances_running, samples_variances_running):
                layer_mean_variances_running.update(
                    layer_samples_variance_running.std())

        # calculate the final mean over all transformations (and layers)

        mean_variances = [b.mean() for b in mean_variances_running]
        return MeasureResult(mean_variances,
                             activations_iterator.layer_names(), self)
Example #13
0
def plot_heatmap(m: MeasureResult, vmin=None, vmax=None):

    for i, l in enumerate(m.layers):
        d = len(l.shape)
        if d > 1:
            dims = tuple(range(1, d))
            m.layers[i] = np.nanmean(l, axis=dims)

    if vmax is None: vmax = get_limit(m, "max")
    if vmin is None:
        vmin = get_limit(m, "min")
        if vmin > 0:
            vmin = 0

    n = len(m.layer_names)

    f, axes = plt.subplots(1, n, dpi=150, squeeze=False)
    mappable = None
    for i, (activation, name) in enumerate(zip(m.layers, m.layer_names)):
        ax = axes[0, i]
        ax.axis("off")
        activation = activation[:, np.newaxis]
        mappable = ax.imshow(activation,
                             vmin=vmin,
                             vmax=vmax,
                             cmap='inferno',
                             aspect="auto")

        if n < 40:
            if len(name) > 7:
                name = name[:6] + "."
            ax.set_title(name, fontsize=4, rotation=45)
    f.subplots_adjust(right=0.8)
    cbar_ax = f.add_axes([0.85, 0.15, 0.05, 0.7])
    cbar = f.colorbar(mappable, cax=cbar_ax, extend='max')
    cbar.cmap.set_over('green')
    cbar.cmap.set_bad(color='gray')
    return f
Example #14
0
    def eval(self,
             activations_iterator: ActivationsIterator,
             verbose=False) -> MeasureResult:
        activations_iterator = activations_iterator.get_inverted_activations_iterator(
        )
        mean_running = None

        for x, transformation_activations_iterator in activations_iterator.samples_first(
        ):
            # transformation_activations_iterator can iterate over all transforms
            for x_transformed, activations in transformation_activations_iterator:
                if mean_running is None:
                    mean_running = [
                        RunningMeanWelford() for i in range(len(activations))
                    ]
                for j, layer_activations in enumerate(activations):
                    layer_measure = self.distance_aggregation.apply(
                        layer_activations)
                    # update the mean over all transformation
                    mean_running[j].update(layer_measure)
        # calculate the final mean over all samples (and layers)
        means = [b.mean() for b in mean_running]
        return MeasureResult(means, activations_iterator.layer_names(), self)
Example #15
0
    def eval(self,
             activations_iterator: ActivationsIterator,
             verbose=False) -> MeasureResult:
        activations_iterator = activations_iterator.get_inverted_activations_iterator(
        )

        mean_running = None
        for x, transformation_activations in activations_iterator.samples_first(
        ):
            transformation_variances_running = None
            #calculate the running mean/variance/std over all transformations of x
            for x_transformed, activations in transformation_activations:
                if mean_running is None:
                    n_layers = len(activations)
                    mean_running = [
                        RunningMeanWelford() for i in range(n_layers)
                    ]
                if transformation_variances_running is None:
                    n_layers = len(activations)
                    transformation_variances_running = [
                        RunningMeanAndVarianceWelford()
                        for i in range(n_layers)
                    ]
                for i, layer_activations in enumerate(activations):
                    # apply function to conv layers
                    # update the mean over all transformations for this sample
                    transformation_variances_running[i].update_all(
                        layer_activations)
            # update the mean with the numpy sample of all transformations of x
            for i, layer_variance in enumerate(
                    transformation_variances_running):
                mean_running[i].update(layer_variance.std())

        # calculate the final mean over all samples (for each layer)
        mean_variances = [b.mean() for b in mean_running]
        return MeasureResult(mean_variances,
                             activations_iterator.layer_names(), self)
 def generate_result_from_layer_results(self,results,names):
     return MeasureResult(results, names, self)
Example #17
0
 def generate_result_from_layer_results(self, results_tresholds, names):
     results_tresholds, tresholds = zip(*results_tresholds)
     return MeasureResult(results_tresholds,
                          names,
                          self,
                          extra_values={"thresholds": tresholds})